mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
merge with 2.4dev and fix conflict
This commit is contained in:
@@ -37,7 +37,6 @@
|
||||
'soc',
|
||||
'kratos',
|
||||
'elastic-fleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
@@ -123,7 +122,6 @@
|
||||
'soc',
|
||||
'kratos',
|
||||
'elastic-fleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
@@ -144,7 +142,6 @@
|
||||
'soc',
|
||||
'kratos',
|
||||
'elastic-fleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'manager',
|
||||
'idstools',
|
||||
@@ -175,7 +172,6 @@
|
||||
'soc',
|
||||
'kratos',
|
||||
'elastic-fleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
|
||||
@@ -225,31 +225,17 @@ init_monitor() {
|
||||
}
|
||||
|
||||
is_manager_node() {
|
||||
# Check to see if this is a manager node
|
||||
role=$(lookup_role)
|
||||
is_single_node_grid && return 0
|
||||
[ $role == 'manager' ] && return 0
|
||||
[ $role == 'managersearch' ] && return 0
|
||||
[ $role == 'helix' ] && return 0
|
||||
return 1
|
||||
grep "role: so-" /etc/salt/grains | grep -E "manager|eval|managersearch|standalone|import" &> /dev/null
|
||||
}
|
||||
|
||||
is_sensor_node() {
|
||||
# Check to see if this is a sensor (forward) node
|
||||
role=$(lookup_role)
|
||||
is_single_node_grid && return 0
|
||||
[ $role == 'sensor' ] && return 0
|
||||
[ $role == 'heavynode' ] && return 0
|
||||
[ $role == 'helix' ] && return 0
|
||||
return 1
|
||||
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode|helix" &> /dev/null
|
||||
}
|
||||
|
||||
is_single_node_grid() {
|
||||
role=$(lookup_role)
|
||||
[ $role == 'eval' ] && return 0
|
||||
[ $role == 'standalone' ] && return 0
|
||||
[ $role == 'import' ] && return 0
|
||||
return 1
|
||||
grep "role: so-" /etc/salt/grains | grep -E "eval|standalone|import" &> /dev/null
|
||||
}
|
||||
|
||||
lookup_bond_interfaces() {
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
|
||||
|
||||
# Exit if an error occurs. The next highstate will retry.
|
||||
set -e
|
||||
|
||||
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
|
||||
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ container_list() {
|
||||
"so-elastalert"
|
||||
"so-elastic-agent"
|
||||
"so-elastic-agent-builder"
|
||||
"so-elastic-fleet-package-registry"
|
||||
"so-elasticsearch"
|
||||
"so-filebeat"
|
||||
"so-grafana"
|
||||
|
||||
@@ -9,7 +9,11 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create
|
||||
salt-call state.apply playbook.db_init,playbook
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
salt-call state.apply playbook,playbook.automation_user_create
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
|
||||
12
salt/common/tools/sbin/so-sensoroni-restart
Executable file
12
salt/common/tools/sbin/so-sensoroni-restart
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart sensoroni $1
|
||||
12
salt/common/tools/sbin/so-sensoroni-start
Executable file
12
salt/common/tools/sbin/so-sensoroni-start
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start sensoroni $1
|
||||
12
salt/common/tools/sbin/so-sensoroni-stop
Executable file
12
salt/common/tools/sbin/so-sensoroni-stop
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop sensoroni $1
|
||||
@@ -7,7 +7,6 @@
|
||||
|
||||
{% do KIBANACONFIG.kibana.config.elasticsearch.update({'username': salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user'), 'password': salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass')}) %}
|
||||
|
||||
{% do KIBANACONFIG.kibana.config.xpack.fleet.update({'registryUrl': 'http://' ~ GLOBALS.manager_ip ~ ':8080'}) %}
|
||||
|
||||
{% if salt['pillar.get']('kibana:secrets') %}
|
||||
{% do KIBANACONFIG.kibana.config.xpack.update({'encryptedSavedObjects': {'encryptionKey': pillar['kibana']['secrets']['encryptedSavedObjects']['encryptionKey']}}) %}
|
||||
|
||||
@@ -5,6 +5,8 @@ kibana:
|
||||
name: kibana
|
||||
host: "0.0.0.0"
|
||||
basePath: /kibana
|
||||
publicBaseUrl: https://{{salt['pillar.get']('global:url_base')}}/kibana
|
||||
rewriteBasePath: false
|
||||
elasticsearch:
|
||||
ssl:
|
||||
verificationMode: none
|
||||
@@ -31,7 +33,6 @@ kibana:
|
||||
kibanaServer:
|
||||
hostname: localhost
|
||||
fleet:
|
||||
registryUrl: ""
|
||||
packages:
|
||||
- name: fleet_server
|
||||
version: latest
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%}
|
||||
# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %}
|
||||
|
||||
set -e
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
try_count=6
|
||||
|
||||
@@ -191,7 +191,6 @@ base:
|
||||
{%- if REDIS %}
|
||||
- redis
|
||||
{%- endif %}
|
||||
- elastic-fleet-package-registry
|
||||
{%- if KIBANA %}
|
||||
- kibana.so_savedobjects_defaults
|
||||
{%- endif %}
|
||||
|
||||
@@ -1943,7 +1943,7 @@ securityonion_repo() {
|
||||
# if the package is updated when the update_packages function is called
|
||||
logCmd "yum -v -y update centos-release"
|
||||
info "Backing up the .repo files that were added by the centos-release package."
|
||||
logCmd "find /etc/yum.repos.d/ -type f -not -name 'securityonion*repo' -exec mv -bvf {} /root/oldrepos/ \;"
|
||||
logCmd "mv -bvf /etc/yum.repos.d/CentOS* /root/oldrepos/"
|
||||
logCmd "yum repolist all"
|
||||
fi
|
||||
}
|
||||
@@ -2380,3 +2380,15 @@ wait_for_file() {
|
||||
wait_for_salt_minion() {
|
||||
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || exit 1
|
||||
}
|
||||
|
||||
verify_setup() {
|
||||
info "Verifying setup"
|
||||
output=$(./so-verify "$setup_type" 2>&1)
|
||||
result=$?
|
||||
echo "$output" >> "$setup_log"
|
||||
if [[ $result -eq 0 ]]; then
|
||||
whiptail_setup_complete
|
||||
else
|
||||
whiptail_setup_failed
|
||||
fi
|
||||
}
|
||||
@@ -585,7 +585,7 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
fi
|
||||
checkin_at_boot
|
||||
set_initial_firewall_access
|
||||
whiptail_setup_complete
|
||||
verify_setup
|
||||
else
|
||||
touch /root/accept_changes
|
||||
mkdir -p /opt/so
|
||||
@@ -610,9 +610,8 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
checkin_at_boot
|
||||
logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/"
|
||||
whiptail_setup_complete
|
||||
verify_setup
|
||||
fi
|
||||
|
||||
# Need to make sure the latest install is located on the web server of the manager to check the versions and donwload the code if required
|
||||
|
||||
|
||||
fi
|
||||
|
||||
98
setup/so-verify
Executable file
98
setup/so-verify
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
cd "$(dirname "$0")" || exit 255
|
||||
|
||||
source ../salt/common/tools/sbin/so-common
|
||||
source ./so-functions
|
||||
source ./so-variables
|
||||
|
||||
setup_type=$1
|
||||
|
||||
using_iso() {
|
||||
if [ "$setup_type" == "iso" ]; then
|
||||
return 0
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check entire setup log for errors or unexpected salt states
|
||||
log_has_errors() {
|
||||
# Ignore salt mast cached public key and minion failed to auth because this is a test
|
||||
# to see if the salt key had already been accepted.
|
||||
|
||||
# Ignore failed to connect to ::1 since we have most curls wrapped in a retry.
|
||||
|
||||
# Ignore perl-Error- since that is the name of a Perl package SO installs.
|
||||
|
||||
# Ignore Failed: 0 since that is the salt state output, and we detect state failures
|
||||
# via Result: False already.
|
||||
|
||||
grep -E "FAILED|Failed|failed|ERROR|Error|Result: False" "$setup_log" | \
|
||||
grep -vE "The Salt Master has cached the public key for this node" | \
|
||||
grep -vE "Minion failed to authenticate with the master" | \
|
||||
grep -vE "Failed to connect to ::1" | \
|
||||
grep -vE "perl-Error-" | \
|
||||
grep -vE "Failed:\s*?[0-9]+" | \
|
||||
grep -vE "Status .* was not found" | \
|
||||
grep -vE "Uncaught exception, closing connection" | \
|
||||
grep -vE "Exception in callback None" | \
|
||||
grep -vE "deprecation: ERROR" | \
|
||||
grep -vE "code: 100" | \
|
||||
grep -vE "Running scope as unit" &> "$error_log"
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# For ISO installs, we know nothing else can be running on this server, so there should be
|
||||
# nothing in any mail spool dir.
|
||||
cron_error_in_mail_spool() {
|
||||
if find /var/spool/mail/ -type f -size +0 &> /dev/null; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# so-setup must return a 0 exit code, indicating all containers are up and healthy. Will retry for a limited
|
||||
# time before giving up.
|
||||
status_failed() {
|
||||
max_retries=120
|
||||
wait_secs=10
|
||||
retry_attempts=0
|
||||
while ! so-status -q; do
|
||||
if [[ $retry_attempts -eq $max_retries ]]; then
|
||||
return 0
|
||||
fi
|
||||
retry_attempts=$((retry_attempts+1))
|
||||
echo "INFO: so-status returned non-zero exit code; will retry in $wait_secs seconds ($retry_attempts/$max_retries)"
|
||||
sleep $wait_secs
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
main() {
|
||||
exit_code=0
|
||||
if log_has_errors; then
|
||||
echo "WARNING: Errors detected during setup"
|
||||
exit_code=1
|
||||
elif using_iso && cron_error_in_mail_spool; then
|
||||
echo "WARNING: Unexpected cron job output in mail spool"
|
||||
exit_code=1
|
||||
elif is_manager_node && status_failed; then
|
||||
echo "WARNING: Containers are not in a healthy state"
|
||||
exit_code=1
|
||||
else
|
||||
echo "Successfully completed setup!"
|
||||
fi
|
||||
|
||||
exit $exit_code
|
||||
}
|
||||
|
||||
main
|
||||
Reference in New Issue
Block a user