Merge branch '2.4/dev' into kilo

This commit is contained in:
Jason Ertel
2023-10-25 09:04:36 -04:00
40 changed files with 2882 additions and 354 deletions
+11 -11
View File
@@ -1,18 +1,18 @@
### 2.4.20-20231006 ISO image released on 2023/10/06 ### 2.4.20-20231012 ISO image released on 2023/10/12
### Download and Verify ### Download and Verify
2.4.20-20231006 ISO image: 2.4.20-20231012 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso
MD5: 269F00308C53976BF0EAE788D1DB29DB MD5: 7D6ACA843068BA9432B3FF63BFD1EF0F
SHA1: 3F7C2324AE1271112F3B752BA4724AF36688FC27 SHA1: BEF2B906066A1B04921DF0B80E7FDD4BC8ECED5C
SHA256: 542B8B3F4F75AD24DC78007F8FE0857E00DC4CC9F4870154DCB8D5D0C4144B65 SHA256: 5D511D50F11666C69AE12435A47B9A2D30CB3CC88F8D38DC58A5BC0ECADF1BF5
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.20-20231006.iso.sig securityonion-2.4.20-20231006.iso gpg --verify securityonion-2.4.20-20231012.iso.sig securityonion-2.4.20-20231012.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Tue 03 Oct 2023 11:40:51 AM EDT using RSA key ID FE507013 gpg: Signature made Thu 12 Oct 2023 01:28:32 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.
+12 -8
View File
@@ -7,19 +7,23 @@
tgt_type='compound') | dictsort() tgt_type='compound') | dictsort()
%} %}
{% set hostname = cached_grains[minionid]['host'] %} # only add a node to the pillar if it returned an ip from the mine
{% set node_type = minionid.split('_')[1] %} {% if ip | length > 0%}
{% if node_type not in node_types.keys() %} {% set hostname = cached_grains[minionid]['host'] %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %} {% set node_type = minionid.split('_')[1] %}
{% else %} {% if node_type not in node_types.keys() %}
{% if hostname not in node_types[node_type] %} {% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %} {% else %}
{% do node_types[node_type][hostname].update(ip[0]) %} {% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %} {% endif %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
logstash: logstash:
nodes: nodes:
{% for node_type, values in node_types.items() %} {% for node_type, values in node_types.items() %}
+14 -10
View File
@@ -4,18 +4,22 @@
{% set hostname = minionid.split('_')[0] %} {% set hostname = minionid.split('_')[0] %}
{% set node_type = minionid.split('_')[1] %} {% set node_type = minionid.split('_')[1] %}
{% set is_alive = False %} {% set is_alive = False %}
{% if minionid in manage_alived.keys() %}
{% if ip[0] == manage_alived[minionid] %} # only add a node to the pillar if it returned an ip from the mine
{% set is_alive = True %} {% if ip | length > 0%}
{% if minionid in manage_alived.keys() %}
{% if ip[0] == manage_alived[minionid] %}
{% set is_alive = True %}
{% endif %}
{% endif %} {% endif %}
{% endif %} {% if node_type not in node_types.keys() %}
{% if node_type not in node_types.keys() %} {% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
{% else %} {% else %}
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %} {% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
{% else %}
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %}
{% endif %}
{% endif %} {% endif %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
+6
View File
@@ -50,6 +50,12 @@ pki_public_ca_crt:
attempts: 5 attempts: 5
interval: 30 interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms: cakeyperms:
file.managed: file.managed:
- replace: False - replace: False
+82 -17
View File
@@ -8,7 +8,7 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a # Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files, # script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file. # and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.8.2" ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
@@ -133,22 +133,37 @@ check_elastic_license() {
} }
check_salt_master_status() { check_salt_master_status() {
local timeout=$1 local count=0
echo "Checking if we can talk to the salt master" local attempts="${1:- 10}"
salt-call state.show_top concurrent=true current_time="$(date '+%b %d %H:%M:%S')"
echo "Checking if we can access the salt master and that it is ready at: ${current_time}"
return while ! salt-call state.show_top -l error concurrent=true 1> /dev/null; do
current_time="$(date '+%b %d %H:%M:%S')"
echo "Can't access salt master or it is not ready at: ${current_time}"
((count+=1))
if [[ $count -eq $attempts ]]; then
# 10 attempts takes about 5.5 minutes
echo "Gave up trying to access salt-master"
return 1
fi
done
current_time="$(date '+%b %d %H:%M:%S')"
echo "Successfully accessed and salt master ready at: ${current_time}"
return 0
} }
# this is only intended to be used to check the status of the minion
check_salt_minion_status() { check_salt_minion_status() {
local timeout=$1 local minion="$1"
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 local timeout="${2:-5}"
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 local logfile="${3:-'/dev/stdout'}"
echo "Checking if the salt minion will respond to jobs" >> "$logfile" 2>&1
salt "$minion" test.ping -t $timeout > /dev/null 2>&1
local status=$? local status=$?
if [ $status -gt 0 ]; then if [ $status -gt 0 ]; then
echo " Minion did not respond" >> "$setup_log" 2>&1 echo " Minion did not respond" >> "$logfile" 2>&1
else else
echo " Received job response from salt minion" >> "$setup_log" 2>&1 echo " Received job response from salt minion" >> "$logfile" 2>&1
fi fi
return $status return $status
@@ -428,6 +443,24 @@ run_check_net_err() {
fi fi
} }
wait_for_salt_minion() {
local minion="$1"
local timeout="${2:-5}"
local logfile="${3:-'/dev/stdout'}"
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
attempt=$((attempt+1))
if [[ $attempt -eq $maxAttempts ]]; then
return 1
fi
sleep 10
done
return 0
}
salt_minion_count() { salt_minion_count() {
local MINIONDIR="/opt/so/saltstack/local/pillar/minions" local MINIONDIR="/opt/so/saltstack/local/pillar/minions"
MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l) MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l)
@@ -440,19 +473,51 @@ set_os() {
OS=rocky OS=rocky
OSVER=9 OSVER=9
is_rocky=true is_rocky=true
is_rpm=true
elif grep -q "CentOS Stream release 9" /etc/redhat-release; then elif grep -q "CentOS Stream release 9" /etc/redhat-release; then
OS=centos OS=centos
OSVER=9 OSVER=9
is_centos=true is_centos=true
elif grep -q "Oracle Linux Server release 9" /etc/system-release; then is_rpm=true
OS=oel elif grep -q "AlmaLinux release 9" /etc/redhat-release; then
OS=alma
OSVER=9 OSVER=9
is_oracle=true is_alma=true
is_rpm=true
elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then
if [ -f /etc/oracle-release ]; then
OS=oracle
OSVER=9
is_oracle=true
is_rpm=true
else
OS=rhel
OSVER=9
is_rhel=true
is_rpm=true
fi
fi fi
cron_service_name="crond" cron_service_name="crond"
else elif [ -f /etc/os-release ]; then
OS=ubuntu if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
is_ubuntu=true OSVER=focal
UBVER=20.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then
OSVER=jammy
UBVER=22.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then
OSVER=bookworm
DEBVER=12
is_debian=true
OS=debian
is_deb=true
fi
cron_service_name="cron" cron_service_name="cron"
fi fi
} }
+1 -1
View File
@@ -137,7 +137,7 @@ update_docker_containers() {
for i in "${TRUSTED_CONTAINERS[@]}" for i in "${TRUSTED_CONTAINERS[@]}"
do do
if [ -z "$PROGRESS_CALLBACK" ]; then if [ -z "$PROGRESS_CALLBACK" ]; then
echo "Downloading $i" >> "$LOG_FILE" 2>&1 echo "Downloading $i" >> "$LOG_FILE" 2>&1
else else
$PROGRESS_CALLBACK $i $PROGRESS_CALLBACK $i
fi fi
+2 -1
View File
@@ -136,6 +136,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
fi fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -230,4 +231,4 @@ else
echo -e "\nResult: One or more errors found" echo -e "\nResult: One or more errors found"
fi fi
exit $RESULT exit $RESULT
+8
View File
@@ -6,6 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# include ssl since docker service requires the intca
include:
- ssl
dockergroup: dockergroup:
group.present: group.present:
@@ -86,6 +89,11 @@ docker_running:
- enable: True - enable: True
- watch: - watch:
- file: docker_daemon - file: docker_daemon
- x509: trusttheca
- require:
- file: docker_daemon
- x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present # Reserve OS ports for Docker proxy in case boot settings are not already applied/present
# 57314 = Strelka, 47760-47860 = Zeek # 57314 = Strelka, 47760-47860 = Zeek
+13 -1
View File
@@ -30,18 +30,24 @@ elasticfleet:
packages: packages:
- apache - apache
- auditd - auditd
- auth0
- aws - aws
- azure - azure
- barracuda - barracuda
- carbonblack_edr
- cisco_asa - cisco_asa
- cisco_duo
- cisco_meraki
- cisco_umbrella
- cloudflare - cloudflare
- crowdstrike - crowdstrike
- darktrace - darktrace
- elasticsearch - elasticsearch
- endpoint - endpoint
- f5_bigip - f5_bigip
- fleet_server
- fim - fim
- fireeye
- fleet_server
- fortinet - fortinet
- fortinet_fortigate - fortinet_fortigate
- gcp - gcp
@@ -57,18 +63,24 @@ elasticfleet:
- m365_defender - m365_defender
- microsoft_defender_endpoint - microsoft_defender_endpoint
- microsoft_dhcp - microsoft_dhcp
- mimecast
- netflow - netflow
- o365 - o365
- okta - okta
- osquery_manager - osquery_manager
- panw - panw
- pfsense - pfsense
- pulse_connect_secure
- redis - redis
- sentinel_one - sentinel_one
- snyk
- sonicwall_firewall - sonicwall_firewall
- sophos
- sophos_central
- symantec_endpoint - symantec_endpoint
- system - system
- tcp - tcp
- tenable_sc
- ti_abusech - ti_abusech
- ti_misp - ti_misp
- ti_otx - ti_otx
@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "8.8.0" "version": "8.10.2"
}, },
"enabled": true, "enabled": true,
"policy_id": "endpoints-initial", "policy_id": "endpoints-initial",
+18
View File
@@ -42,6 +42,23 @@ elastic_fleet_integration_create() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
} }
elastic_fleet_integration_remove() {
AGENT_POLICY=$1
NAME=$2
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$AGENT_POLICY" | jq -r '.item.package_policies[] | select(.name=="'"$NAME"'") | .id')
JSON_STRING=$( jq -n \
--arg INTEGRATIONID "$INTEGRATION_ID" \
'{"packagePolicyIds":[$INTEGRATIONID]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_integration_update() { elastic_fleet_integration_update() {
UPDATE_ID=$1 UPDATE_ID=$1
@@ -98,3 +115,4 @@ elastic_fleet_policy_update() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
} }
@@ -0,0 +1,27 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# Usage: Run with --force to update the Elastic Defend integration policy
. /usr/sbin/so-elastic-fleet-common
# Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
if [ "$1" = "--force" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
else
printf "\n\nIntegration $NAME exists - Not updating - rerun with --force to force the update.\n"
fi
else
printf "\n\nIntegration does not exist - Creating integration\n"
elastic_fleet_integration_create "@$INTEGRATION"
fi
done
+4
View File
@@ -12,6 +12,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# First, check for any package upgrades # First, check for any package upgrades
/usr/sbin/so-elastic-fleet-package-upgrade /usr/sbin/so-elastic-fleet-package-upgrade
# Second, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do do
@@ -65,3 +68,4 @@ else
exit $RETURN_CODE exit $RETURN_CODE
fi fi
+522
View File
@@ -4398,3 +4398,525 @@ elasticsearch:
min_age: 365d min_age: 365d
actions: actions:
delete: {} delete: {}
so-logs-auth0_x_logs:
index_sorting: False
index_template:
index_patterns:
- "logs-auth0.logs-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-auth0.logs@package"
- "logs-auth0.logs@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-carbonblack_edr_x_log:
index_sorting: False
index_template:
index_patterns:
- "logs-carbonblack_edr.log-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-carbonblack_edr.log@package"
- "logs-carbonblack_edr.log@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_duo_x_admin:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_duo.admin-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_duo.admin@package"
- "logs-cisco_duo.admin@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_duo_x_auth:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_duo.auth-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_duo.auth@package"
- "logs-cisco_duo.auth@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_duo_x_offline_enrollment:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_duo.offline_enrollment-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_duo.offline_enrollment@package"
- "logs-cisco_duo.offline_enrollment@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_duo_x_summary:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_duo.summary-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_duo.summary@package"
- "logs-cisco_duo.summary@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_duo_x_telephony:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_duo.telephony-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_duo.telephony@package"
- "logs-cisco_duo.telephony@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_meraki_x_events:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_meraki.events-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_meraki.events@package"
- "logs-cisco_meraki.events@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_meraki_x_log:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_meraki.log-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_meraki.log@package"
- "logs-cisco_meraki.log@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-cisco_umbrella_x_log:
index_sorting: False
index_template:
index_patterns:
- "logs-cisco_umbrella.log-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-cisco_umbrella.log@package"
- "logs-cisco_umbrella.log@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-fireeye_x_nx:
index_sorting: False
index_template:
index_patterns:
- "logs-fireeye.nx-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-fireeye.nx@package"
- "logs-fireeye.nx@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_audit_events:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.audit_events-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.audit_events@package"
- "logs-mimecast.audit_events@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_dlp_logs:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.dlp_logs-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.dlp_logs@package"
- "logs-mimecast.dlp_logs@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_siem_logs:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.siem_logs-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.siem_logs@package"
- "logs-mimecast.siem_logs@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_threat_intel_malware_customer:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.threat_intel_malware_customer-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.threat_intel_malware_customer@package"
- "logs-mimecast.threat_intel_malware_customer@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_threat_intel_malware_grid:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.threat_intel_malware_grid-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.threat_intel_malware_grid@package"
- "logs-mimecast.threat_intel_malware_grid@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_ttp_ap_logs:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.ttp_ap_logs-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.ttp_ap_logs@package"
- "logs-mimecast.ttp_ap_logs@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_ttp_ip_logs:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.ttp_ip_logs-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.ttp_ip_logs@package"
- "logs-mimecast.ttp_ip_logs@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-mimecast_x_ttp_url_logs:
index_sorting: False
index_template:
index_patterns:
- "logs-mimecast.ttp_url_logs-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-mimecast.ttp_url_logs@package"
- "logs-mimecast.ttp_url_logs@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-pulse_connect_secure_x_log:
index_sorting: False
index_template:
index_patterns:
- "logs-pulse_connect_secure.log-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-pulse_connect_secure.log@package"
- "logs-pulse_connect_secure.log@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-snyk_x_audit:
index_sorting: False
index_template:
index_patterns:
- "logs-snyk.audit-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-snyk.audit@package"
- "logs-snyk.audit@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-snyk_x_vulnerabilities:
index_sorting: False
index_template:
index_patterns:
- "logs-snyk.vulnerabilities-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-snyk.vulnerabilities@package"
- "logs-snyk.vulnerabilities@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-sophos_x_utm:
index_sorting: False
index_template:
index_patterns:
- "logs-sophos.utm-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-sophos.utm@package"
- "logs-sophos.utm@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-sophos_x_xg:
index_sorting: False
index_template:
index_patterns:
- "logs-sophos.xg-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-sophos.xg@package"
- "logs-sophos.xg@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-sophos_central_x_alert:
index_sorting: False
index_template:
index_patterns:
- "logs-sophos_central.alert-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-sophos_central.alert@package"
- "logs-sophos_central.alert@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-sophos_central_x_event:
index_sorting: False
index_template:
index_patterns:
- "logs-sophos_central.event-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-sophos_central.event@package"
- "logs-sophos_central.event@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-tenable_sc_x_asset:
index_sorting: False
index_template:
index_patterns:
- "logs-tenable_sc.asset-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-tenable_sc.asset@package"
- "logs-tenable_sc.asset@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-tenable_sc_x_plugin:
index_sorting: False
index_template:
index_patterns:
- "logs-tenable_sc.plugin-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-tenable_sc.plugin@package"
- "logs-tenable_sc.plugin@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
so-logs-tenable_sc_x_vulnerability:
index_sorting: False
index_template:
index_patterns:
- "logs-tenable_sc.vulnerability-*"
template:
settings:
index:
number_of_replicas: 0
composed_of:
- "logs-tenable_sc.vulnerability@package"
- "logs-tenable_sc.vulnerability@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
@@ -2,6 +2,7 @@
"description" : "suricata.common", "description" : "suricata.common",
"processors" : [ "processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } }, { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } }, { "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } }, { "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.8.2","id": "8.8.2","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} {"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
@@ -63,7 +63,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))' IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do for i in "${LINES[@]}"; do
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.8.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done done
+35 -8
View File
@@ -406,12 +406,17 @@ function update_logstash_outputs() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
} }
function checkMine() {
local func=$1
# make sure the minion sees itself in the mine since it needs to see itself for states as opposed to using salt-run
retry 20 1 "salt '$MINION_ID' mine.get '\*' '$func'" "$MINION_ID"
}
function updateMine() { function updateMine() {
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC" retry 20 1 "salt '$MINION_ID' mine.update" True
}
function apply_ES_state() {
salt-call state.apply elasticsearch concurrent=True
} }
function createEVAL() { function createEVAL() {
is_pcaplimit=true is_pcaplimit=true
add_elasticsearch_to_minion add_elasticsearch_to_minion
@@ -547,8 +552,6 @@ function createSEARCHNODE() {
add_elasticsearch_to_minion add_elasticsearch_to_minion
add_logstash_to_minion add_logstash_to_minion
add_telegraf_to_minion add_telegraf_to_minion
updateMine
apply_ES_state
} }
function createRECEIVER() { function createRECEIVER() {
@@ -563,6 +566,19 @@ function createDESKTOP() {
} }
function testConnection() { function testConnection() {
# the minion should be trying to auth every 10 seconds so 15 seconds should be more than enough time to see this in the log
# this retry was put in because it is possible that a minion is attempted to be pinged before it has authenticated and connected to the Salt master
# causing the first ping to fail and typically wouldn't be successful until the second ping
# this check may pass without the minion being authenticated if it was previously connected and the line exists in the log
retry 15 1 "grep 'Authentication accepted from $MINION_ID' /opt/so/log/salt/master"
local retauth=$?
if [[ $retauth != 0 ]]; then
echo "The Minion did not authenticate with the Salt master in the allotted time"
echo "Deleting the key"
deleteminion
exit 1
fi
retry 15 3 "salt '$MINION_ID' test.ping" True retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$? local ret=$?
if [[ $ret != 0 ]]; then if [[ $ret != 0 ]]; then
@@ -582,9 +598,9 @@ if [[ "$OPERATION" = 'delete' ]]; then
deleteminion deleteminion
fi fi
if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then
# Skip this if its setup # Skip this if its setup
if [ $OPERATION != 'setup' ]; then if [[ $OPERATION == 'add' ]]; then
# Accept the salt key # Accept the salt key
acceptminion acceptminion
# Test to see if the minion was accepted # Test to see if the minion was accepted
@@ -605,8 +621,19 @@ if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
else else
add_sensoroni_to_minion add_sensoroni_to_minion
fi fi
create$NODETYPE create$NODETYPE
echo "Minion file created for $MINION_ID" echo "Minion file created for $MINION_ID"
if [[ "$OPERATION" == 'add' ]]; then
# tell the minion to populate the mine with data from mine_functions which is populated during setup
# this only needs to happen on non managers since they handle this during setup
# and they need to wait for ca creation to update the mine
updateMine
checkMine "network.ip_addrs"
# run this async so the cli doesn't wait for a return
salt "$MINION_ID" state.highstate --async
fi
fi fi
if [[ "$OPERATION" = 'test' ]]; then if [[ "$OPERATION" = 'test' ]]; then
+61 -28
View File
@@ -460,7 +460,6 @@ stop_salt_master() {
echo "" echo ""
echo "Killing any queued Salt jobs on the manager." echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1 pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
set -e
echo "" echo ""
echo "Storing salt-master pid." echo "Storing salt-master pid."
@@ -468,6 +467,7 @@ stop_salt_master() {
echo "Found salt-master PID $MASTERPID" echo "Found salt-master PID $MASTERPID"
systemctl_func "stop" "salt-master" systemctl_func "stop" "salt-master"
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option." timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
set -e
} }
stop_salt_minion() { stop_salt_minion() {
@@ -480,14 +480,12 @@ stop_salt_minion() {
echo "" echo ""
echo "Killing Salt jobs on this node." echo "Killing Salt jobs on this node."
salt-call saltutil.kill_all_jobs --local salt-call saltutil.kill_all_jobs --local
set -e
echo "Storing salt-minion pid." echo "Storing salt-minion pid."
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1) MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
echo "Found salt-minion PID $MINIONPID" echo "Found salt-minion PID $MINIONPID"
systemctl_func "stop" "salt-minion" systemctl_func "stop" "salt-minion"
set +e
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
set -e set -e
} }
@@ -578,7 +576,7 @@ update_centos_repo() {
} }
update_salt_mine() { update_salt_mine() {
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host." echo "Populating the mine with mine_functions for each host."
set +e set +e
salt \* mine.update -b 50 salt \* mine.update -b 50
set -e set -e
@@ -620,6 +618,7 @@ upgrade_check_salt() {
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion." echo "You are already running the correct version of Salt for Security Onion."
else else
echo "Salt needs to be upgraded to $NEWSALTVERSION."
UPGRADESALT=1 UPGRADESALT=1
fi fi
} }
@@ -628,22 +627,48 @@ upgrade_salt() {
SALTUPGRADED=True SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo "" echo ""
# If CentOS # If rhel family
if [[ $OS == 'centos' ]]; then if [[ $is_rpm ]]; then
echo "Removing yum versionlock for Salt." echo "Removing yum versionlock for Salt."
echo "" echo ""
yum versionlock delete "salt-*" yum versionlock delete "salt-*"
echo "Updating Salt packages." echo "Updating Salt packages."
echo "" echo ""
set +e set +e
run_check_net_err \ # if oracle run with -r to ignore repos set by bootstrap
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ if [[ $OS == 'oracle' ]]; then
"Could not update salt, please check $SOUP_LOG for details." run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
set -e set -e
echo "Applying yum versionlock for Salt." echo "Applying yum versionlock for Salt."
echo "" echo ""
yum versionlock add "salt-*" yum versionlock add "salt-*"
# Else do Ubuntu things # Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi fi
echo "Checking if Salt was upgraded." echo "Checking if Salt was upgraded."
@@ -655,7 +680,7 @@ upgrade_salt() {
echo "Once the issue is resolved, run soup again." echo "Once the issue is resolved, run soup again."
echo "Exiting." echo "Exiting."
echo "" echo ""
exit 0 exit 1
else else
echo "Salt upgrade success." echo "Salt upgrade success."
echo "" echo ""
@@ -691,13 +716,16 @@ verify_latest_update_script() {
# Keeping this block in case we need to do a hotfix that requires salt update # Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() { apply_hotfix() {
# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
# fix_wazuh salt-call state.apply elasticfleet -l info queue=True
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then # elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
# 2_3_10_hotfix_1 # 2_3_10_hotfix_1
# else else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
# fi fi
} }
@@ -733,14 +761,8 @@ main() {
echo "" echo ""
set_os set_os
if ! check_salt_master_status; then
echo "Could not talk to salt master"
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "SOUP will now attempt to start the salt-master service and exit."
exit 1
fi
echo "This node can communicate with the salt-master." check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "Checking to see if this is a manager." echo "Checking to see if this is a manager."
echo "" echo ""
@@ -788,7 +810,7 @@ main() {
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
yum clean all yum clean all
check_os_updates check_os_updates
elif [[ $OS == 'oel' ]]; then elif [[ $OS == 'oracle' ]]; then
# sync remote repo down to local if not airgap # sync remote repo down to local if not airgap
repo_sync repo_sync
check_os_updates check_os_updates
@@ -805,7 +827,8 @@ main() {
echo "Hotfix applied" echo "Hotfix applied"
update_version update_version
enable_highstate enable_highstate
salt-call state.highstate -l info queue=True (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
else else
echo "" echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
@@ -826,7 +849,7 @@ main() {
else else
update_registry update_registry
set +e set +e
update_docker_containers "soup" "" "" "$SOUP_LOG" update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
set -e set -e
fi fi
@@ -841,6 +864,14 @@ main() {
echo "Upgrading Salt" echo "Upgrading Salt"
# Update the repo files so it can actually upgrade # Update the repo files so it can actually upgrade
upgrade_salt upgrade_salt
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
# * WARN: Not starting daemons on Debian based distributions
# is not working mostly because starting them is the default behaviour.
if [[ $is_deb ]]; then
stop_salt_minion
stop_salt_master
fi
fi fi
preupgrade_changes preupgrade_changes
@@ -878,7 +909,7 @@ main() {
# Testing that salt-master is up by checking that is it connected to itself # Testing that salt-master is up by checking that is it connected to itself
set +e set +e
echo "Waiting on the Salt Master service to be ready." echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details." check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e set -e
# update the salt-minion configs here and start the minion # update the salt-minion configs here and start the minion
@@ -903,7 +934,8 @@ main() {
echo "" echo ""
echo "Running a highstate. This could take several minutes." echo "Running a highstate. This could take several minutes."
set +e set +e
salt-call state.highstate -l info queue=True (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e set -e
stop_salt_master stop_salt_master
@@ -914,11 +946,12 @@ main() {
set +e set +e
echo "Waiting on the Salt Master service to be ready." echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details." check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e set -e
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
postupgrade_changes postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update [[ $is_airgap -eq 0 ]] && unmount_update
+1 -1
View File
@@ -41,7 +41,7 @@ pcap_sbin:
- file_mode: 755 - file_mode: 755
{% if PCAPBPF %} {% if PCAPBPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %} {% if BPF_CALC['stderr'] == "" %}
{% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %} {% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %}
{% else %} {% else %}
+90
View File
@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
import logging
from time import sleep
import os
import salt.client
log = logging.getLogger(__name__)
local = salt.client.LocalClient()
def start(interval=60):
def mine_delete(minion, func):
log.warning('checkmine engine: deleting mine function %s for %s' % (func, minion))
local.cmd(minion, 'mine.delete', [func])
def mine_flush(minion):
log.warning('checkmine engine: flushing mine cache for %s' % minion)
local.cmd(minion, 'mine.flush')
def mine_update(minion):
log.warning('checkmine engine: updating mine cache for %s' % minion)
local.cmd(minion, 'mine.update')
log.info("checkmine engine: started")
cachedir = __opts__['cachedir']
while True:
log.debug('checkmine engine: checking which minions are alive')
manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=False)
log.debug('checkmine engine: alive minions: %s' % ' , '.join(manage_alived))
for minion in manage_alived:
mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p')
# it is possible that a minion is alive, but hasn't created a mine file yet
try:
mine_size = os.path.getsize(mine_path)
log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size))
# For some reason the mine file can be corrupt and only be 1 byte in size
if mine_size == 1:
log.error('checkmine engine: found %s to be 1 byte' % mine_path)
mine_flush(minion)
mine_update(minion)
continue
except FileNotFoundError:
log.warning('checkmine engine: minion: %s %s does not exist' % (minion, mine_path))
mine_flush(minion)
mine_update(minion)
continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try:
mine_ip = network_ip_addrs[minion][0]
log.debug('checkmine engine: found minion %s has mine_ip: %s' % (minion, mine_ip))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a mine_ip' % (minion))
mine_delete(minion, 'network.ip_addrs')
mine_update(minion)
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
sleep(interval)
+6
View File
@@ -0,0 +1,6 @@
engines_dirs:
- /etc/salt/engines
engines:
- checkmine:
interval: 60
+1 -1
View File
@@ -23,7 +23,7 @@
{% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %} {% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %}
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% elif grains.os_family|lower == 'debian' %} {% elif grains.os_family|lower == 'debian' %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %}
{% endif %} {% endif %}
{% else %} {% else %}
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %} {% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}
+1 -1
View File
@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt: salt:
master: master:
version: 3006.1 version: 3006.3
+24 -12
View File
@@ -12,22 +12,34 @@ hold_salt_master_package:
- name: salt-master - name: salt-master
{% endif %} {% endif %}
# prior to 2.4.30 this engine ran on the manager with salt-minion
# this has changed to running with the salt-master in 2.4.30
remove_engines_config:
file.absent:
- name: /etc/salt/minion.d/engines.conf
- source: salt://salt/files/engines.conf
- watch_in:
- service: salt_minion_service
checkmine_engine:
file.managed:
- name: /etc/salt/engines/checkmine.py
- source: salt://salt/engines/master/checkmine.py
- makedirs: True
engines_config:
file.managed:
- name: /etc/salt/master.d/engines.conf
- source: salt://salt/files/engines.conf
salt_master_service: salt_master_service:
service.running: service.running:
- name: salt-master - name: salt-master
- enable: True - enable: True
- watch:
checkmine_engine: - file: checkmine_engine
file.absent: - file: engines_config
- name: /etc/salt/engines/checkmine.py - order: last
- watch_in:
- service: salt_minion_service
engines_config:
file.absent:
- name: /etc/salt/minion.d/engines.conf
- watch_in:
- service: salt_minion_service
{% else %} {% else %}
+1 -1
View File
@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt: salt:
minion: minion:
version: 3006.1 version: 3006.3
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds. service_start_delay: 30 # in seconds.
+3 -1
View File
@@ -67,6 +67,9 @@ set_log_levels:
- "log_level: info" - "log_level: info"
- "log_level_logfile: info" - "log_level_logfile: info"
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a sleep timer on service start
# it is not required to restart the service
salt_minion_service_unit_file: salt_minion_service_unit_file:
file.managed: file.managed:
- name: {{ SYSTEMD_UNIT_FILE }} - name: {{ SYSTEMD_UNIT_FILE }}
@@ -89,6 +92,5 @@ salt_minion_service:
- file: mine_functions - file: mine_functions
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %} {% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels - file: set_log_levels
- file: salt_minion_service_unit_file
{% endif %} {% endif %}
- order: last - order: last
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,10 @@
# Malware Hash Registry
## Description
Search Team Cymru's Malware Hash Registry for a file hash.
## Configuration Requirements
None.
**NOTE:** If you try to run the Malware Hash Registry analyzer but it results in a "Name or service not known" error, then it may be a DNS issue. Folks using 8.8.4.4 or 8.8.8.8 as their DNS resolver have reported this issue. A potential workaround is to switch to another DNS resolver like 1.1.1.1.
+1 -1
View File
@@ -3,5 +3,5 @@ post_setup_cron:
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate' - name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
- identifier: post_setup_cron - identifier: post_setup_cron
- user: root - user: root
- minute: '*/1' - minute: '*/5'
- identifier: post_setup_cron - identifier: post_setup_cron
+6 -4
View File
@@ -13,11 +13,13 @@
{% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %} {% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %}
{% endfor %} {% endfor %}
{# add nodes from the logstash:nodes pillar to soc.server.modules.elastic.remoteHostUrls #} {# add all grid heavy nodes to soc.server.modules.elastic.remoteHostUrls #}
{% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %} {% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %}
{% for m in minions.keys() %} {% if node_type in ['heavynode'] %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %} {% for m in minions.keys() %}
{% endfor %} {% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
{% endfor %}
{% endif %}
{% endfor %} {% endfor %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %} {% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}
+2 -2
View File
@@ -67,10 +67,10 @@ function manage_minion() {
response=$(so-minion "-o=$op" "-m=$minion_id") response=$(so-minion "-o=$op" "-m=$minion_id")
exit_code=$? exit_code=$?
if [[ exit_code -eq 0 ]]; then if [[ exit_code -eq 0 ]]; then
log "Successful command execution" log "Successful '$op' command execution on $minion_id"
respond "$id" "true" respond "$id" "true"
else else
log "Unsuccessful command execution: $response ($exit_code)" log "Unsuccessful '$op' command execution on $minion_id: $response ($exit_code)"
respond "$id" "false" respond "$id" "false"
fi fi
} }
+2
View File
@@ -52,6 +52,8 @@ so-soctopus:
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch:
- file: /opt/so/conf/soctopus/SOCtopus.conf
- require: - require:
- file: soctopusconf - file: soctopusconf
- file: navigatordefaultlayer - file: navigatordefaultlayer
+1 -1
View File
@@ -129,7 +129,7 @@ surithresholding:
# BPF compilation and configuration # BPF compilation and configuration
{% if SURICATABPF %} {% if SURICATABPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %} {% if BPF_CALC['stderr'] == "" %}
{% set BPF_STATUS = 1 %} {% set BPF_STATUS = 1 %}
{% else %} {% else %}
+1 -1
View File
@@ -280,7 +280,7 @@ suricata:
mqtt: mqtt:
enabled: 'no' enabled: 'no'
http2: http2:
enabled: 'no' enabled: 'yes'
asn1-max-frames: 256 asn1-max-frames: 256
run-as: run-as:
user: suricata user: suricata
+1 -1
View File
@@ -152,7 +152,7 @@ plcronscript:
# BPF compilation and configuration # BPF compilation and configuration
{% if ZEEKBPF %} {% if ZEEKBPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %} {% if BPF_CALC['stderr'] == "" %}
{% set BPF_STATUS = 1 %} {% set BPF_STATUS = 1 %}
{% else %} {% else %}
+4 -3
View File
@@ -49,12 +49,13 @@ zeek:
- frameworks/files/hash-all-files - frameworks/files/hash-all-files
- frameworks/files/detect-MHR - frameworks/files/detect-MHR
- policy/frameworks/notice/extend-email/hostnames - policy/frameworks/notice/extend-email/hostnames
- policy/frameworks/notice/community-id
- policy/protocols/conn/community-id-logging
- ja3 - ja3
- hassh - hassh
- intel - intel
- cve-2020-0601 - cve-2020-0601
- securityonion/bpfconf - securityonion/bpfconf
- securityonion/communityid
- securityonion/file-extraction - securityonion/file-extraction
- oui-logging - oui-logging
- icsnpp-modbus - icsnpp-modbus
@@ -75,7 +76,7 @@ zeek:
- LogAscii::use_json = T; - LogAscii::use_json = T;
- CaptureLoss::watch_interval = 5 mins; - CaptureLoss::watch_interval = 5 mins;
networks: networks:
HOME_NET: HOME_NET:
- 192.168.0.0/16 - 192.168.0.0/16
- 10.0.0.0/8 - 10.0.0.0/8
- 172.16.0.0/12 - 172.16.0.0/12
@@ -120,4 +121,4 @@ zeek:
- stats - stats
- stderr - stderr
- stdout - stdout
+13 -7
View File
@@ -1258,7 +1258,7 @@ generate_ssl() {
# if the install type is a manager then we need to wait for the minion to be ready before trying # if the install type is a manager then we need to wait for the minion to be ready before trying
# to run the ssl state since we need the minion to sign the certs # to run the ssl state since we need the minion to sign the certs
if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then
wait_for_salt_minion wait_for_salt_minion "$MINION_ID" "5" "$setup_log" || fail_setup
fi fi
info "Applying SSL state" info "Applying SSL state"
logCmd "salt-call state.apply ssl -l info" logCmd "salt-call state.apply ssl -l info"
@@ -1972,6 +1972,7 @@ securityonion_repo() {
} }
repo_sync_local() { repo_sync_local() {
SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //')
info "Repo Sync" info "Repo Sync"
if [[ $is_supported ]]; then if [[ $is_supported ]]; then
# Sync the repo from the the SO repo locally. # Sync the repo from the the SO repo locally.
@@ -2021,7 +2022,7 @@ repo_sync_local() {
curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo
rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
curl -fsSL https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/3006.1.repo | tee /etc/yum.repos.d/salt.repo curl -fsSL "https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/$SALTVERSION.repo" | tee /etc/yum.repos.d/salt.repo
dnf repolist dnf repolist
curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install
else else
@@ -2111,11 +2112,6 @@ saltify() {
} }
# Run a salt command to generate the minion key
salt_firstcheckin() {
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
}
salt_install_module_deps() { salt_install_module_deps() {
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/" logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/"
logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/" logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/"
@@ -2500,6 +2496,16 @@ wait_for_file() {
wait_for_salt_minion() { wait_for_salt_minion() {
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status; do
attempt=$((attempt+1))
if [[ $attempt -eq $maxAttempts ]]; then
fail_setup
fi
sleep 10
done
} }
verify_setup() { verify_setup() {
+12 -5
View File
@@ -91,7 +91,7 @@ fi
# if packages are updated and the box isn't rebooted # if packages are updated and the box isn't rebooted
if [[ $is_debian ]]; then if [[ $is_debian ]]; then
update_packages update_packages
if [[ -f "/var/run/reboot-required" ]]; then if [[ -f "/var/run/reboot-required" ]] && [ -z "$test_profile" ]; then
whiptail_debian_reboot_required whiptail_debian_reboot_required
reboot reboot
fi fi
@@ -714,6 +714,17 @@ if ! [[ -f $install_opt_file ]]; then
logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common.packages"
logCmd "salt-call state.apply common" logCmd "salt-call state.apply common"
# this will apply the salt.minion state first since salt.master includes salt.minion
logCmd "salt-call state.apply salt.master"
# wait here until we get a response from the salt-master since it may have just restarted
# exit setup after 5-6 minutes of trying
check_salt_master_status || fail "Can't access salt master or it is not ready"
# apply the ca state to create the ca and put it in the mine early in the install
# the minion ip will already be in the mine from configure_minion function in so-functions
generate_ca
# this will also call the ssl state since docker requires the intca
# the salt-minion service will need to be up on the manager to sign requests
generate_ssl
logCmd "salt-call state.apply docker" logCmd "salt-call state.apply docker"
firewall_generate_templates firewall_generate_templates
set_initial_firewall_policy set_initial_firewall_policy
@@ -721,8 +732,6 @@ if ! [[ -f $install_opt_file ]]; then
title "Downloading Elastic Agent Artifacts" title "Downloading Elastic Agent Artifacts"
download_elastic_agent_artifacts download_elastic_agent_artifacts
generate_ca
generate_ssl
logCmd "salt-call state.apply -l info firewall" logCmd "salt-call state.apply -l info firewall"
# create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf # create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf
@@ -768,8 +777,6 @@ if ! [[ -f $install_opt_file ]]; then
checkin_at_boot checkin_at_boot
set_initial_firewall_access set_initial_firewall_access
logCmd "salt-call schedule.enable -linfo --local" logCmd "salt-call schedule.enable -linfo --local"
systemctl restart salt-master
systemctl restart salt-minion
verify_setup verify_setup
else else
touch /root/accept_changes touch /root/accept_changes
Binary file not shown.