mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 09:42:46 +01:00
Merge branch 'dev' into feature/setup
# Conflicts: # salt/thehive/scripts/cortex_init # salt/thehive/scripts/hive_init # setup/so-functions # setup/so-whiptail
This commit is contained in:
@@ -32,7 +32,7 @@ BROVERSION=ZEEK
|
||||
# EVALADVANCED=BASIC
|
||||
GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=standalone
|
||||
install_type=STANDALONE
|
||||
@@ -40,8 +40,8 @@ install_type=STANDALONE
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MASTERADV=BASIC
|
||||
MASTERUPDATES=1
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
@@ -55,7 +55,7 @@ NIDS=Suricata
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MASTER
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
|
||||
@@ -24,7 +24,7 @@ SOVERSION=$(cat ../VERSION)
|
||||
accept_salt_key_remote() {
|
||||
systemctl restart salt-minion
|
||||
|
||||
echo "Accept the key remotely on the master" >> "$setup_log" 2>&1
|
||||
echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1
|
||||
# Delete the key just in case.
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y
|
||||
salt-call state.apply ca
|
||||
@@ -43,14 +43,14 @@ add_admin_user() {
|
||||
|
||||
}
|
||||
|
||||
add_master_hostfile() {
|
||||
add_manager_hostfile() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
echo "Checking if I can resolve master. If not add to hosts file" >> "$setup_log" 2>&1
|
||||
echo "Checking if I can resolve manager. If not add to hosts file" >> "$setup_log" 2>&1
|
||||
# Pop up an input to get the IP address
|
||||
MSRVIP=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
|
||||
"Enter your Manager Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
@@ -60,7 +60,7 @@ addtotab_generate_templates() {
|
||||
|
||||
local addtotab_path=$local_salt_dir/pillar/data
|
||||
|
||||
for i in evaltab mastersearchtab mastertab nodestab sensorstab standalonetab; do
|
||||
for i in evaltab managersearchtab managertab nodestab sensorstab standalonetab; do
|
||||
printf '%s\n'\
|
||||
"$i:"\
|
||||
"" > "$addtotab_path"/$i.sls
|
||||
@@ -87,11 +87,11 @@ so_add_user() {
|
||||
fi
|
||||
}
|
||||
|
||||
add_socore_user_master() {
|
||||
add_socore_user_manager() {
|
||||
so_add_user "socore" "939" "939" "/opt/so" >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
add_soremote_user_master() {
|
||||
add_soremote_user_manager() {
|
||||
so_add_user "soremote" "947" "947" "/home/soremote" "$SOREMOTEPASS1" >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@ bro_logs_enabled() {
|
||||
"brologs:"\
|
||||
" enabled:" > "$brologs_pillar"
|
||||
|
||||
if [ "$MASTERADV" = 'ADVANCED' ]; then
|
||||
if [ "$MANAGERADV" = 'ADVANCED' ]; then
|
||||
for BLOG in "${BLOGS[@]}"; do
|
||||
echo " - $BLOG" | tr -d '"' >> "$brologs_pillar"
|
||||
done
|
||||
@@ -265,12 +265,12 @@ check_web_pass() {
|
||||
check_pass_match "$WEBPASSWD1" "$WEBPASSWD2" "WPMATCH"
|
||||
}
|
||||
|
||||
clear_master() {
|
||||
# Clear out the old master public key in case this is a re-install.
|
||||
# This only happens if you re-install the master.
|
||||
clear_manager() {
|
||||
# Clear out the old manager public key in case this is a re-install.
|
||||
# This only happens if you re-install the manager.
|
||||
if [ -f /etc/salt/pki/minion/minion_master.pub ]; then
|
||||
{
|
||||
echo "Clearing old master key";
|
||||
echo "Clearing old Salt master key";
|
||||
rm -f /etc/salt/pki/minion/minion_master.pub;
|
||||
systemctl -q restart salt-minion;
|
||||
} >> "$setup_log" 2>&1
|
||||
@@ -360,7 +360,7 @@ configure_minion() {
|
||||
'helix')
|
||||
echo "master: $HOSTNAME" >> "$minion_config"
|
||||
;;
|
||||
'master' | 'eval' | 'mastersearch' | 'standalone')
|
||||
'manager' | 'eval' | 'managersearch' | 'standalone')
|
||||
printf '%s\n'\
|
||||
"master: $HOSTNAME"\
|
||||
"mysql.host: '$MAINIP'"\
|
||||
@@ -437,9 +437,9 @@ check_requirements() {
|
||||
fi
|
||||
}
|
||||
|
||||
copy_master_config() {
|
||||
copy_salt_master_config() {
|
||||
|
||||
# Copy the master config template to the proper directory
|
||||
# Copy the Salt master config template to the proper directory
|
||||
if [ "$setup_type" = 'iso' ]; then
|
||||
cp /root/SecurityOnion/files/master /etc/salt/master >> "$setup_log" 2>&1
|
||||
else
|
||||
@@ -452,7 +452,7 @@ copy_master_config() {
|
||||
|
||||
copy_minion_tmp_files() {
|
||||
case "$install_type" in
|
||||
'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE')
|
||||
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE')
|
||||
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
||||
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
|
||||
if [ -d "$temp_install_dir"/salt ] ; then
|
||||
@@ -461,12 +461,12 @@ copy_minion_tmp_files() {
|
||||
;;
|
||||
*)
|
||||
{
|
||||
echo "scp pillar and salt files in $temp_install_dir to master $local_salt_dir";
|
||||
echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
|
||||
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
|
||||
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/salt/patch/os/schedules/* soremote@"$MSRV":/tmp/"$MINION_ID"/schedules;
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/master/files/add_minion.sh "$MINION_ID";
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
|
||||
} >> "$setup_log" 2>&1
|
||||
;;
|
||||
esac
|
||||
@@ -479,8 +479,8 @@ copy_ssh_key() {
|
||||
mkdir -p /root/.ssh
|
||||
ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero
|
||||
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
|
||||
echo "Copying the SSH key to the master"
|
||||
#Copy the key over to the master
|
||||
echo "Copying the SSH key to the manager"
|
||||
#Copy the key over to the manager
|
||||
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
|
||||
}
|
||||
|
||||
@@ -703,7 +703,7 @@ docker_install() {
|
||||
|
||||
else
|
||||
case "$install_type" in
|
||||
'MASTER' | 'EVAL')
|
||||
'MANAGER' | 'EVAL')
|
||||
apt-get update >> "$setup_log" 2>&1
|
||||
;;
|
||||
*)
|
||||
@@ -733,7 +733,7 @@ docker_registry() {
|
||||
|
||||
echo "Setting up Docker Registry" >> "$setup_log" 2>&1
|
||||
mkdir -p /etc/docker >> "$setup_log" 2>&1
|
||||
# Make the host use the master docker registry
|
||||
# Make the host use the manager docker registry
|
||||
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
|
||||
printf '%s\n'\
|
||||
"{"\
|
||||
@@ -832,12 +832,24 @@ firewall_generate_templates() {
|
||||
|
||||
cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/ >> "$setup_log" 2>&1
|
||||
|
||||
for i in analyst beats_endpoint sensor master minion osquery_endpoint search_node wazuh_endpoint; do
|
||||
for i in analyst beats_endpoint sensor manager minion osquery_endpoint search_node wazuh_endpoint; do
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
fleet_pillar() {
|
||||
|
||||
local pillar_file="$temp_install_dir"/pillar/minions/"$MINION_ID".sls
|
||||
|
||||
# Create the fleet pillar
|
||||
printf '%s\n'\
|
||||
"fleet:"\
|
||||
" mainip: $MAINIP"\
|
||||
" manager: $MSRV"\
|
||||
"" > "$pillar_file"
|
||||
}
|
||||
|
||||
generate_passwords(){
|
||||
# Generate Random Passwords for Things
|
||||
MYSQLPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
|
||||
@@ -871,7 +883,7 @@ got_root() {
|
||||
get_minion_type() {
|
||||
local minion_type
|
||||
case "$install_type" in
|
||||
'EVAL' | 'MASTERSEARCH' | 'MASTER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE')
|
||||
'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE')
|
||||
minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]')
|
||||
;;
|
||||
'HELIXSENSOR')
|
||||
@@ -904,13 +916,13 @@ install_cleanup() {
|
||||
|
||||
}
|
||||
|
||||
master_pillar() {
|
||||
manager_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
|
||||
# Create the master pillar
|
||||
# Create the manager pillar
|
||||
printf '%s\n'\
|
||||
"master:"\
|
||||
"manager:"\
|
||||
" mainip: $MAINIP"\
|
||||
" mainint: $MNIC"\
|
||||
" esheap: $ES_HEAP_SIZE"\
|
||||
@@ -919,7 +931,7 @@ master_pillar() {
|
||||
" domainstats: 0" >> "$pillar_file"
|
||||
|
||||
|
||||
if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'HELIXSENSOR' ] || [ "$install_type" = 'MASTERSEARCH' ] || [ "$install_type" = 'STANDALONE' ]; then
|
||||
if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'HELIXSENSOR' ] || [ "$install_type" = 'MANAGERSEARCH' ] || [ "$install_type" = 'STANDALONE' ]; then
|
||||
printf '%s\n'\
|
||||
" mtu: $MTU" >> "$pillar_file"
|
||||
fi
|
||||
@@ -949,6 +961,16 @@ master_pillar() {
|
||||
" playbook: $PLAYBOOK"\
|
||||
" url_base: $REDIRECTIT"\
|
||||
""\
|
||||
"elasticsearch:"\
|
||||
" mainip: $MAINIP"\
|
||||
" mainint: $MNIC"\
|
||||
" esheap: $NODE_ES_HEAP_SIZE"\
|
||||
" esclustername: {{ grains.host }}"\
|
||||
" node_type: $NODETYPE"\
|
||||
" es_port: $node_es_port"\
|
||||
" log_size_limit: $log_size_limit"\
|
||||
" node_route_type: hot"\
|
||||
""\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: 125"\
|
||||
" ls_input_threads: 1"\
|
||||
@@ -966,19 +988,19 @@ master_pillar() {
|
||||
cat "$pillar_file" >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
master_static() {
|
||||
manager_static() {
|
||||
local static_pillar="$local_salt_dir/pillar/static.sls"
|
||||
|
||||
# Create a static file for global values
|
||||
printf '%s\n'\
|
||||
"static:"\
|
||||
" soversion: $SOVERSION"\
|
||||
" hnmaster: $HNMASTER"\
|
||||
" hnmanager: $HNMANAGER"\
|
||||
" ntpserver: $NTPSERVER"\
|
||||
" proxy: $PROXY"\
|
||||
" broversion: $BROVERSION"\
|
||||
" ids: $NIDS"\
|
||||
" masterip: $MAINIP"\
|
||||
" managerip: $MAINIP"\
|
||||
" hiveuser: $WEBUSER"\
|
||||
" hivepassword: $WEBPASSWD1"\
|
||||
" hivekey: $HIVEKEY"\
|
||||
@@ -990,7 +1012,7 @@ master_static() {
|
||||
" cortexorguserkey: $CORTEXORGUSERKEY"\
|
||||
" grafanapassword: $WEBPASSWD1"\
|
||||
" fleet_custom_hostname: "\
|
||||
" fleet_master: False"\
|
||||
" fleet_manager: False"\
|
||||
" fleet_node: False"\
|
||||
" fleet_packages-timestamp: N/A"\
|
||||
" fleet_packages-version: 1"\
|
||||
@@ -998,12 +1020,75 @@ master_static() {
|
||||
" fleet_ip: N/A"\
|
||||
" sensoronikey: $SENSORONIKEY"\
|
||||
" wazuh: $WAZUH"\
|
||||
" masterupdate: $MASTERUPDATES"\
|
||||
" managerupdate: $MANAGERUPDATES"\
|
||||
"strelka:"\
|
||||
" enabled: $STRELKA"\
|
||||
" rules: $STRELKARULES"\
|
||||
"curator:"\
|
||||
" hot_warm: False"\
|
||||
"elastic:"\
|
||||
" features: False" > "$static_pillar"
|
||||
" features: False"\
|
||||
"elasticsearch:"\
|
||||
" replicas: 0"\
|
||||
" true_cluster: False"\
|
||||
" true_cluster_name: so"\
|
||||
" discovery_nodes: 1"\
|
||||
" hot_warm_enabled: False"\
|
||||
" cluster_routing_allocation_disk.threshold_enabled: true"\
|
||||
" cluster_routing_allocation_disk_watermark_low: 95%"\
|
||||
" cluster_routing_allocation_disk_watermark_high: 98%"\
|
||||
" cluster_routing_allocation_disk_watermark_flood_stage: 98%"\
|
||||
" index_settings:"\
|
||||
" so-beats:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-firewall:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-flow:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-ids:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-import:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 73000"\
|
||||
" delete: 73001"\
|
||||
" so-osquery:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-ossec:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-strelka:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-syslog:"\
|
||||
" shards: 1"\
|
||||
" warm: 7"\
|
||||
" close: 30"\
|
||||
" delete: 365"\
|
||||
" so-zeek:"\
|
||||
" shards: 5"\
|
||||
" warm: 7"\
|
||||
" close: 365"\
|
||||
" delete: 45" > "$static_pillar"
|
||||
|
||||
printf '%s\n' '----' >> "$setup_log" 2>&1
|
||||
cat "$static_pillar" >> "$setup_log" 2>&1
|
||||
@@ -1055,15 +1140,10 @@ elasticsearch_pillar() {
|
||||
" node_type: $NODETYPE"\
|
||||
" es_port: $node_es_port"\
|
||||
" log_size_limit: $log_size_limit"\
|
||||
" cur_close_days: $CURCLOSEDAYS"\
|
||||
" route_type: hot"\
|
||||
" index_settings:"\
|
||||
" so-zeek:"\
|
||||
" shards: 5"\
|
||||
" replicas: 0"\
|
||||
" node_route_type: hot"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
if [ "$install_type" != 'EVAL' ] && [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'MASTERSEARCH' ] && [ "$install_type" != 'STANDALONE' ]; then
|
||||
if [ "$install_type" != 'EVAL' ] && [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'MANAGERSEARCH' ] && [ "$install_type" != 'STANDALONE' ]; then
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
|
||||
@@ -1151,11 +1231,11 @@ saltify() {
|
||||
set_progress_str 6 'Installing various dependencies'
|
||||
yum -y install wget nmap-ncat >> "$setup_log" 2>&1
|
||||
case "$install_type" in
|
||||
'MASTER' | 'EVAL' | 'MASTERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
reserve_group_ids >> "$setup_log" 2>&1
|
||||
yum -y install epel-release >> "$setup_log" 2>&1
|
||||
yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1
|
||||
# Download Ubuntu Keys in case master updates = 1
|
||||
# Download Ubuntu Keys in case manager updates = 1
|
||||
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
||||
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
|
||||
@@ -1166,7 +1246,7 @@ saltify() {
|
||||
systemctl enable salt-master >> "$setup_log" 2>&1
|
||||
;;
|
||||
*)
|
||||
if [ "$MASTERUPDATES" = '1' ]; then
|
||||
if [ "$MANAGERUPDATES" = '1' ]; then
|
||||
{
|
||||
# Create the GPG Public Key for the Salt Repo
|
||||
cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key;
|
||||
@@ -1222,7 +1302,7 @@ saltify() {
|
||||
'FLEET')
|
||||
if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi
|
||||
;;
|
||||
'MASTER' | 'EVAL' | 'MASTERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR?
|
||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR?
|
||||
if [ "$OSVER" != "xenial" ]; then local py_ver_url_path="/py3"; else local py_ver_url_path="/apt"; fi
|
||||
|
||||
# Add saltstack repo(s)
|
||||
@@ -1252,9 +1332,9 @@ saltify() {
|
||||
apt-mark hold salt-master >> "$setup_log" 2>&1
|
||||
;;
|
||||
*)
|
||||
# Copy down the gpg keys and install them from the master
|
||||
# Copy down the gpg keys and install them from the manager
|
||||
mkdir "$temp_install_dir"/gpg >> "$setup_log" 2>&1
|
||||
echo "scp the gpg keys and install them from the master" >> "$setup_log" 2>&1
|
||||
echo "scp the gpg keys and install them from the manager" >> "$setup_log" 2>&1
|
||||
scp -v -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/gpg/* "$temp_install_dir"/gpg >> "$setup_log" 2>&1
|
||||
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
|
||||
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||
@@ -1280,7 +1360,7 @@ saltify() {
|
||||
salt_checkin() {
|
||||
|
||||
case "$install_type" in
|
||||
'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE') # Fix Mine usage
|
||||
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') # Fix Mine usage
|
||||
{
|
||||
echo "Building Certificate Authority";
|
||||
salt-call state.apply ca;
|
||||
@@ -1348,7 +1428,7 @@ setup_salt_master_dirs() {
|
||||
cp -R ../salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
|
||||
fi
|
||||
|
||||
echo "Chown the salt dirs on the master for socore" >> "$setup_log" 2>&1
|
||||
echo "Chown the salt dirs on the manager for socore" >> "$setup_log" 2>&1
|
||||
chown -R socore:socore /opt/so
|
||||
}
|
||||
|
||||
@@ -1403,7 +1483,7 @@ sensor_pillar() {
|
||||
" brobpf:"\
|
||||
" pcapbpf:"\
|
||||
" nidsbpf:"\
|
||||
" master: $MSRV"\
|
||||
" manager: $MSRV"\
|
||||
" mtu: $MTU"\
|
||||
" uniqueid: $(date '+%s')" >> "$pillar_file"
|
||||
if [ "$HNSENSOR" != 'inherit' ]; then
|
||||
@@ -1449,7 +1529,7 @@ set_hostname() {
|
||||
|
||||
set_hostname_iso
|
||||
|
||||
if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
|
||||
if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
if ! getent hosts "$MSRV"; then
|
||||
echo "$MSRVIP $MSRV" >> /etc/hosts
|
||||
fi
|
||||
@@ -1476,13 +1556,13 @@ set_initial_firewall_policy() {
|
||||
if [ -f $default_salt_dir/salt/common/tools/sbin/so-firewall ]; then chmod +x $default_salt_dir/salt/common/tools/sbin/so-firewall; fi
|
||||
|
||||
case "$install_type" in
|
||||
'MASTER')
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
|
||||
'MANAGER')
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost minion "$MAINIP"
|
||||
$default_salt_dir/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||
$default_salt_dir/pillar/data/addtotab.sh managertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||
;;
|
||||
'EVAL' | 'MASTERSEARCH' | 'STANDALONE')
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
|
||||
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE')
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
|
||||
@@ -1490,8 +1570,8 @@ set_initial_firewall_policy() {
|
||||
'EVAL')
|
||||
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE True
|
||||
;;
|
||||
'MASTERSEARCH')
|
||||
$default_salt_dir/pillar/data/addtotab.sh mastersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||
'MANAGERSEARCH')
|
||||
$default_salt_dir/pillar/data/addtotab.sh managersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||
;;
|
||||
'STANDALONE')
|
||||
$default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE
|
||||
@@ -1499,7 +1579,7 @@ set_initial_firewall_policy() {
|
||||
esac
|
||||
;;
|
||||
'HELIXSENSOR')
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
|
||||
;;
|
||||
@@ -1558,7 +1638,7 @@ set_management_interface() {
|
||||
set_node_type() {
|
||||
|
||||
case "$install_type" in
|
||||
'SEARCHNODE' | 'EVAL' | 'MASTERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
|
||||
'SEARCHNODE' | 'EVAL' | 'MANAGERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
|
||||
NODETYPE='search'
|
||||
;;
|
||||
'HOTNODE')
|
||||
@@ -1571,13 +1651,13 @@ set_node_type() {
|
||||
}
|
||||
|
||||
set_updates() {
|
||||
if [ "$MASTERUPDATES" = '1' ]; then
|
||||
if [ "$MANAGERUPDATES" = '1' ]; then
|
||||
if [ "$OS" = 'centos' ]; then
|
||||
if ! grep -q "$MSRV" /etc/yum.conf; then
|
||||
echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
|
||||
fi
|
||||
else
|
||||
# Set it up so the updates roll through the master
|
||||
# Set it up so the updates roll through the manager
|
||||
printf '%s\n'\
|
||||
"Acquire::http::Proxy \"http://$MSRV:3142\";"\
|
||||
"Acquire::https::Proxy \"http://$MSRV:3142\";" > /etc/apt/apt.conf.d/00Proxy
|
||||
@@ -1598,7 +1678,7 @@ update_sudoers() {
|
||||
echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
|
||||
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/common/tools/sbin/so-firewall" | tee -a /etc/sudoers
|
||||
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/data/addtotab.sh" | tee -a /etc/sudoers
|
||||
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
|
||||
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/manager/files/add_minion.sh" | tee -a /etc/sudoers
|
||||
else
|
||||
echo "User soremote already granted sudo privileges" >> "$setup_log" 2>&1
|
||||
fi
|
||||
@@ -1614,7 +1694,7 @@ update_packages() {
|
||||
}
|
||||
|
||||
use_turbo_proxy() {
|
||||
if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
|
||||
if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
echo "turbo is not supported on this install type" >> $setup_log 2>&1
|
||||
return
|
||||
fi
|
||||
@@ -1638,7 +1718,7 @@ ls_heapsize() {
|
||||
fi
|
||||
|
||||
case "$install_type" in
|
||||
'MASTERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
LS_HEAP_SIZE='1000m'
|
||||
;;
|
||||
'EVAL')
|
||||
@@ -1650,7 +1730,7 @@ ls_heapsize() {
|
||||
esac
|
||||
export LS_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MASTERSEARCH|STANDALONE)$ ]]; then
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
NODE_LS_HEAP_SIZE=LS_HEAP_SIZE
|
||||
export NODE_LS_HEAP_SIZE
|
||||
fi
|
||||
@@ -1672,7 +1752,7 @@ es_heapsize() {
|
||||
fi
|
||||
export ES_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MASTERSEARCH|STANDALONE)$ ]]; then
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
|
||||
export NODE_ES_HEAP_SIZE
|
||||
fi
|
||||
|
||||
106
setup/so-setup
106
setup/so-setup
@@ -129,21 +129,21 @@ whiptail_install_type
|
||||
|
||||
if [ "$install_type" = 'EVAL' ]; then
|
||||
is_node=true
|
||||
is_master=true
|
||||
is_manager=true
|
||||
is_sensor=true
|
||||
is_eval=true
|
||||
elif [ "$install_type" = 'STANDALONE' ]; then
|
||||
is_master=true
|
||||
is_distmaster=true
|
||||
is_manager=true
|
||||
is_distmanager=true
|
||||
is_node=true
|
||||
is_sensor=true
|
||||
elif [ "$install_type" = 'MASTERSEARCH' ]; then
|
||||
is_master=true
|
||||
is_distmaster=true
|
||||
elif [ "$install_type" = 'MANAGERSEARCH' ]; then
|
||||
is_manager=true
|
||||
is_distmanager=true
|
||||
is_node=true
|
||||
elif [ "$install_type" = 'MASTER' ]; then
|
||||
is_master=true
|
||||
is_distmaster=true
|
||||
elif [ "$install_type" = 'MANAGER' ]; then
|
||||
is_manager=true
|
||||
is_distmanager=true
|
||||
elif [ "$install_type" = 'SENSOR' ]; then
|
||||
is_sensor=true
|
||||
is_minion=true
|
||||
@@ -169,7 +169,7 @@ elif [[ $is_fleet_standalone ]]; then
|
||||
check_requirements "dist" "fleet"
|
||||
elif [[ $is_sensor && ! $is_eval ]]; then
|
||||
check_requirements "dist" "sensor"
|
||||
elif [[ $is_distmaster || $is_minion ]]; then
|
||||
elif [[ $is_distmanager || $is_minion ]]; then
|
||||
check_requirements "dist"
|
||||
fi
|
||||
|
||||
@@ -214,15 +214,15 @@ if [[ $is_helix ]]; then
|
||||
RULESETUP=ETOPEN
|
||||
NSMSETUP=BASIC
|
||||
HNSENSOR=inherit
|
||||
MASTERUPDATES=0
|
||||
MANAGERUPDATES=0
|
||||
fi
|
||||
|
||||
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
|
||||
if [[ $is_helix || ( $is_manager && $is_node ) ]]; then
|
||||
RULESETUP=ETOPEN
|
||||
NSMSETUP=BASIC
|
||||
fi
|
||||
|
||||
if [[ $is_master && $is_node ]]; then
|
||||
if [[ $is_manager && $is_node ]]; then
|
||||
LSPIPELINEWORKERS=1
|
||||
LSPIPELINEBATCH=125
|
||||
LSINPUTTHREADS=1
|
||||
@@ -241,16 +241,16 @@ if [[ $is_helix || $is_sensor ]]; then
|
||||
calculate_useable_cores
|
||||
fi
|
||||
|
||||
if [[ $is_helix || $is_master ]]; then
|
||||
whiptail_homenet_master
|
||||
if [[ $is_helix || $is_manager ]]; then
|
||||
whiptail_homenet_manager
|
||||
fi
|
||||
|
||||
if [[ $is_helix || $is_master || $is_node ]]; then
|
||||
if [[ $is_helix || $is_manager || $is_node ]]; then
|
||||
set_base_heapsizes
|
||||
fi
|
||||
|
||||
if [[ $is_master && ! $is_eval ]]; then
|
||||
whiptail_master_adv
|
||||
if [[ $is_manager && ! $is_eval ]]; then
|
||||
whiptail_manager_adv
|
||||
whiptail_bro_version
|
||||
whiptail_nids
|
||||
whiptail_rule_setup
|
||||
@@ -259,12 +259,12 @@ if [[ $is_master && ! $is_eval ]]; then
|
||||
whiptail_oinkcode
|
||||
fi
|
||||
|
||||
if [ "$MASTERADV" = 'ADVANCED' ] && [ "$BROVERSION" != 'SURICATA' ]; then
|
||||
whiptail_master_adv_service_brologs
|
||||
if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$BROVERSION" != 'SURICATA' ]; then
|
||||
whiptail_manager_adv_service_brologs
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $is_master ]]; then
|
||||
if [[ $is_manager ]]; then
|
||||
whiptail_components_adv_warning
|
||||
whiptail_enable_components
|
||||
if [[ $STRELKA == 1 ]]; then
|
||||
@@ -274,10 +274,10 @@ if [[ $is_master ]]; then
|
||||
get_redirect
|
||||
fi
|
||||
|
||||
if [[ $is_distmaster || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ]]; then
|
||||
whiptail_master_updates
|
||||
if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then
|
||||
whiptail_master_updates_warning
|
||||
if [[ $is_distmanager || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ]]; then
|
||||
whiptail_manager_updates
|
||||
if [[ $setup_type == 'network' && $MANAGERUPDATES == 1 ]]; then
|
||||
whiptail_manager_updates_warning
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -285,7 +285,7 @@ if [[ $is_minion ]]; then
|
||||
whiptail_management_server
|
||||
fi
|
||||
|
||||
if [[ $is_distmaster ]]; then
|
||||
if [[ $is_distmanager ]]; then
|
||||
collect_soremote_inputs
|
||||
fi
|
||||
|
||||
@@ -351,32 +351,32 @@ fi
|
||||
{
|
||||
set_hostname;
|
||||
set_version;
|
||||
clear_master;
|
||||
clear_manager;
|
||||
} >> $setup_log 2>&1
|
||||
|
||||
|
||||
if [[ $is_master ]]; then
|
||||
if [[ $is_manager ]]; then
|
||||
{
|
||||
generate_passwords;
|
||||
secrets_pillar;
|
||||
add_socore_user_master;
|
||||
add_socore_user_manager;
|
||||
} >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ $is_master && ! $is_eval ]]; then
|
||||
add_soremote_user_master >> $setup_log 2>&1
|
||||
if [[ $is_manager && ! $is_eval ]]; then
|
||||
add_soremote_user_manager >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
set_main_ip >> $setup_log 2>&1
|
||||
|
||||
host_pillar >> $setup_log 2>&1
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
set_updates >> $setup_log 2>&1
|
||||
copy_ssh_key >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ "$OSQUERY" = 1 ]]; then
|
||||
host_pillar >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
|
||||
# Begin install
|
||||
{
|
||||
@@ -410,12 +410,12 @@ fi
|
||||
set_progress_str 9 'Initializing Salt minion'
|
||||
configure_minion "$minion_type" >> $setup_log 2>&1
|
||||
|
||||
if [[ $is_master || $is_helix ]]; then
|
||||
if [[ $is_manager || $is_helix ]]; then
|
||||
set_progress_str 10 'Configuring Salt master'
|
||||
{
|
||||
create_local_directories;
|
||||
addtotab_generate_templates;
|
||||
copy_master_config;
|
||||
copy_salt_master_config;
|
||||
setup_salt_master_dirs;
|
||||
firewall_generate_templates;
|
||||
} >> $setup_log 2>&1
|
||||
@@ -423,11 +423,11 @@ fi
|
||||
set_progress_str 11 'Updating sudoers file for soremote user'
|
||||
update_sudoers >> $setup_log 2>&1
|
||||
|
||||
set_progress_str 12 'Generating master static pillar'
|
||||
master_static >> $setup_log 2>&1
|
||||
set_progress_str 12 'Generating manager static pillar'
|
||||
manager_static >> $setup_log 2>&1
|
||||
|
||||
set_progress_str 13 'Generating master pillar'
|
||||
master_pillar >> $setup_log 2>&1
|
||||
set_progress_str 13 'Generating manager pillar'
|
||||
manager_pillar >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
|
||||
@@ -448,22 +448,22 @@ fi
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
set_progress_str 20 'Accepting Salt key on master'
|
||||
set_progress_str 20 'Accepting Salt key on manager'
|
||||
accept_salt_key_remote >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ $is_master ]]; then
|
||||
if [[ $is_manager ]]; then
|
||||
set_progress_str 20 'Accepting Salt key'
|
||||
salt-key -ya "$MINION_ID" >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
set_progress_str 21 'Copying minion pillars to master'
|
||||
set_progress_str 21 'Copying minion pillars to manager'
|
||||
copy_minion_tmp_files >> $setup_log 2>&1
|
||||
|
||||
set_progress_str 22 'Generating CA and checking in'
|
||||
salt_checkin >> $setup_log 2>&1
|
||||
|
||||
if [[ $is_master || $is_helix ]]; then
|
||||
if [[ $is_manager || $is_helix ]]; then
|
||||
set_progress_str 25 'Configuring firewall'
|
||||
set_initial_firewall_policy >> $setup_log 2>&1
|
||||
|
||||
@@ -476,14 +476,14 @@ fi
|
||||
salt-call state.apply -l info registry >> $setup_log 2>&1
|
||||
docker_seed_registry 2>> "$setup_log" # ~ 60% when finished
|
||||
|
||||
set_progress_str 60 "$(print_salt_state_apply 'master')"
|
||||
salt-call state.apply -l info master >> $setup_log 2>&1
|
||||
set_progress_str 60 "$(print_salt_state_apply 'manager')"
|
||||
salt-call state.apply -l info manager >> $setup_log 2>&1
|
||||
|
||||
set_progress_str 61 "$(print_salt_state_apply 'idstools')"
|
||||
salt-call state.apply -l info idstools >> $setup_log 2>&1
|
||||
|
||||
set_progress_str 61 "$(print_salt_state_apply 'suricata.master')"
|
||||
salt-call state.apply -l info suricata.master >> $setup_log 2>&1
|
||||
set_progress_str 61 "$(print_salt_state_apply 'suricata.manager')"
|
||||
salt-call state.apply -l info suricata.manager >> $setup_log 2>&1
|
||||
|
||||
fi
|
||||
|
||||
@@ -501,7 +501,7 @@ fi
|
||||
set_progress_str 64 "$(print_salt_state_apply 'nginx')"
|
||||
salt-call state.apply -l info nginx >> $setup_log 2>&1
|
||||
|
||||
if [[ $is_master || $is_node ]]; then
|
||||
if [[ $is_manager || $is_node ]]; then
|
||||
set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')"
|
||||
salt-call state.apply -l info elasticsearch >> $setup_log 2>&1
|
||||
fi
|
||||
@@ -522,7 +522,7 @@ fi
|
||||
salt-call state.apply -l info curator >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ $is_master ]]; then
|
||||
if [[ $is_manager ]]; then
|
||||
set_progress_str 69 "$(print_salt_state_apply 'soc')"
|
||||
salt-call state.apply -l info soc >> $setup_log 2>&1
|
||||
|
||||
@@ -586,12 +586,12 @@ fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $is_master || $is_helix ]]; then
|
||||
if [[ $is_manager || $is_helix ]]; then
|
||||
set_progress_str 81 "$(print_salt_state_apply 'utility')"
|
||||
salt-call state.apply -l info utility >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ ( $is_helix || $is_master || $is_node ) && ! $is_eval ]]; then
|
||||
if [[ ( $is_helix || $is_manager || $is_node ) && ! $is_eval ]]; then
|
||||
set_progress_str 82 "$(print_salt_state_apply 'logstash')"
|
||||
salt-call state.apply -l info logstash >> $setup_log 2>&1
|
||||
|
||||
@@ -603,7 +603,7 @@ fi
|
||||
filter_unused_nics >> $setup_log 2>&1
|
||||
network_setup >> $setup_log 2>&1
|
||||
|
||||
if [[ $is_master ]]; then
|
||||
if [[ $is_manager ]]; then
|
||||
set_progress_str 87 'Adding user to SOC'
|
||||
add_web_user >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
@@ -406,24 +406,24 @@ whiptail_helix_apikey() {
|
||||
|
||||
}
|
||||
|
||||
whiptail_homenet_master() {
|
||||
whiptail_homenet_manager() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
HNMASTER=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
HNMANAGER=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your HOME_NET separated by ," 10 75 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
export HNMASTER
|
||||
export HNMANAGER
|
||||
}
|
||||
|
||||
whiptail_homenet_sensor() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
# Ask to inherit from master
|
||||
whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Master?" 8 75
|
||||
# Ask to inherit from manager
|
||||
whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Manager?" 8 75
|
||||
|
||||
local exitstatus=$?
|
||||
|
||||
@@ -458,10 +458,10 @@ whiptail_install_type() {
|
||||
if [[ $install_type == "DISTRIBUTED" ]]; then
|
||||
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose distributed node type:" 13 60 6 \
|
||||
"MASTER" "Start a new grid " ON \
|
||||
"MANAGER" "Start a new grid " ON \
|
||||
"SENSOR" "Create a forward only sensor " OFF \
|
||||
"SEARCHNODE" "Add a search node with parsing " OFF \
|
||||
"MASTERSEARCH" "Master + search node " OFF \
|
||||
"MANAGERSEARCH" "Manager + search node " OFF \
|
||||
"FLEET" "Dedicated Fleet Osquery Node " OFF \
|
||||
"HEAVYNODE" "Sensor + Search Node " OFF \
|
||||
3>&1 1>&2 2>&3
|
||||
@@ -599,26 +599,26 @@ whiptail_management_server() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
MSRV=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your Master Server hostname. It is CASE SENSITIVE!" 10 75 XXXX 3>&1 1>&2 2>&3)
|
||||
"Enter your Manager Server hostname. It is CASE SENSITIVE!" 10 75 XXXX 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
if ! getent hosts "$MSRV"; then
|
||||
add_master_hostfile
|
||||
add_manager_hostfile
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Ask if you want to do advanced setup of the Master
|
||||
whiptail_master_adv() {
|
||||
# Ask if you want to do advanced setup of the Manager
|
||||
whiptail_manager_adv() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose what type of master install:" 20 75 4 \
|
||||
"BASIC" "Install master with recommended settings" ON \
|
||||
"ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
|
||||
MANAGERADV=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose which type of manager to install:" 20 75 4 \
|
||||
"BASIC" "Install manager with recommended settings" ON \
|
||||
"ADVANCED" "Do additional configuration to the manager" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
@@ -626,7 +626,7 @@ whiptail_master_adv() {
|
||||
}
|
||||
|
||||
# Ask which additional components to install
|
||||
whiptail_master_adv_service_brologs() {
|
||||
whiptail_manager_adv_service_brologs() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
@@ -791,7 +791,7 @@ whiptail_patch_name_new_schedule() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
"What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
@@ -799,7 +799,7 @@ whiptail_patch_name_new_schedule() {
|
||||
while [[ -z "$PATCHSCHEDULENAME" ]]; do
|
||||
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for this OS patch schedule." 8 75
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
"What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
done
|
||||
@@ -850,7 +850,7 @@ whiptail_patch_schedule_import() {
|
||||
|
||||
unset PATCHSCHEDULENAME
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
"Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
@@ -858,7 +858,7 @@ whiptail_patch_schedule_import() {
|
||||
while [[ -z "$PATCHSCHEDULENAME" ]]; do
|
||||
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for the OS patch schedule you want to inherit." 8 75
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
"Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
@@ -944,7 +944,7 @@ whiptail_rule_setup() {
|
||||
|
||||
# Get pulled pork info
|
||||
RULESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Which IDS ruleset would you like to use?\n\nThis master server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the master server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
|
||||
"Which IDS ruleset would you like to use?\n\nThis manager server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the manager server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
|
||||
"ETOPEN" "Emerging Threats Open" ON \
|
||||
"ETPRO" "Emerging Threats PRO" OFF \
|
||||
"TALOSET" "Snort Subscriber (Talos) and ET NoGPL rulesets" OFF \
|
||||
@@ -1133,34 +1133,34 @@ whiptail_suricata_pins() {
|
||||
|
||||
}
|
||||
|
||||
whiptail_master_updates() {
|
||||
whiptail_manager_updates() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
local update_string
|
||||
update_string=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"How would you like to download OS package updates for your grid?:" 20 75 4 \
|
||||
"MASTER" "Master node is proxy for updates" ON \
|
||||
"MANAGER" "Manager node is proxy for updates" ON \
|
||||
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
case "$update_string" in
|
||||
'MASTER')
|
||||
MASTERUPDATES='1'
|
||||
'MANAGER')
|
||||
export MANAGERUPDATES='1'
|
||||
;;
|
||||
*)
|
||||
MASTERUPDATES='0'
|
||||
export MANAGERUPDATES='0'
|
||||
;;
|
||||
esac
|
||||
|
||||
}
|
||||
|
||||
whiptail_master_updates_warning() {
|
||||
whiptail_manager_updates_warning() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
whiptail --title "Security Onion Setup"\
|
||||
--msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\
|
||||
--msgbox "Updating through the manager node requires the manager to have internet access, press ENTER to continue."\
|
||||
8 75
|
||||
|
||||
local exitstatus=$?
|
||||
@@ -1173,7 +1173,7 @@ whiptail_node_updates() {
|
||||
|
||||
NODEUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"How would you like to download OS package updates for your grid?:" 20 75 4 \
|
||||
"MASTER" "Master node is proxy for updates." ON \
|
||||
"MANAGER" "Manager node is proxy for updates." ON \
|
||||
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
|
||||
Reference in New Issue
Block a user