Merge pull request #15098 from Security-Onion-Solutions/byoh

Byoh
This commit is contained in:
Josh Patterson
2025-10-01 15:06:01 -04:00
committed by GitHub
11 changed files with 51 additions and 25 deletions

View File

@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
status "- assigning unique identifier to import: $HASH" status "- assigning unique identifier to import: $HASH"
pcap_data=$(pcapinfo "${PCAP}") pcap_data=$(pcapinfo "${PCAP}")
if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then
status "- this PCAP file is invalid; skipping" status "- this PCAP file is invalid; skipping"
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1)) INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
else else
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
HASHES="${HASHES} ${HASH}" HASHES="${HASHES} ${HASH}"
fi fi
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}') START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}')
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}') END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}')
status "- found PCAP data spanning dates $START through $END" status "- found PCAP data spanning dates $START through $END"
# compare $START to $START_OLDEST # compare $START to $START_OLDEST

View File

@@ -27,7 +27,7 @@ fleet_api() {
local QUERYPATH=$1 local QUERYPATH=$1
shift shift
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --fail 2>/dev/null curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
} }
elastic_fleet_integration_check() { elastic_fleet_integration_check() {

View File

@@ -8,6 +8,7 @@
. /usr/sbin/so-elastic-fleet-common . /usr/sbin/so-elastic-fleet-common
ERROR=false
# Manage Elastic Defend Integration for Initial Endpoints Policy # Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do do
@@ -17,13 +18,18 @@ do
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n" printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}" echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
exit 1 ERROR=true
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 ERROR=true
continue
fi fi
fi fi
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi

View File

@@ -17,7 +17,6 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# Third, configure Elastic Defend Integration seperately # Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do do
@@ -27,13 +26,15 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
done done
@@ -47,13 +48,15 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
done done
@@ -70,14 +73,16 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
fi fi
@@ -97,14 +102,16 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
fi fi

View File

@@ -24,6 +24,7 @@ fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %}) default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
ERROR=false
for AGENT_POLICY in $agent_policies; do for AGENT_POLICY in $agent_policies; do
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
# this script upgrades default integration packages, exit 1 and let salt handle retrying # this script upgrades default integration packages, exit 1 and let salt handle retrying
@@ -73,11 +74,13 @@ for AGENT_POLICY in $agent_policies; do
echo "No errors detected. Proceeding with upgrade..." echo "No errors detected. Proceeding with upgrade..."
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'." echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
exit 1 ERROR=true
continue
fi fi
else else
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..." echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
exit 1 ERROR=true
continue
fi fi
fi fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %} {%- if not AUTO_UPGRADE_INTEGRATIONS %}
@@ -86,4 +89,7 @@ for AGENT_POLICY in $agent_policies; do
fi fi
done done
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi
echo echo

View File

@@ -13,6 +13,7 @@
{# Import defaults.yaml for model hardware capabilities #} {# Import defaults.yaml for model hardware capabilities #}
{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %} {% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %}
{% set HYPERVISORMERGED = salt['pillar.get']('hypervisor', default=DEFAULTS.hypervisor, merge=True) %}
{# Get hypervisor nodes from pillar #} {# Get hypervisor nodes from pillar #}
{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %} {% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %}
@@ -30,9 +31,10 @@
{% set model = '' %} {% set model = '' %}
{% if grains %} {% if grains %}
{% set minion_id = grains.keys() | first %} {% set minion_id = grains.keys() | first %}
{% set model = grains[minion_id].get('sosmodel', '') %} {% set model = grains[minion_id].get('sosmodel', grains[minion_id].get('byodmodel', '')) %}
{% endif %} {% endif %}
{% set model_config = DEFAULTS.hypervisor.model.get(model, {}) %}
{% set model_config = HYPERVISORMERGED.model.get(model, {}) %}
{# Get VM list from VMs file #} {# Get VM list from VMs file #}
{% set vms = {} %} {% set vms = {} %}

View File

@@ -169,6 +169,8 @@ airgap_update_dockers() {
tar xf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker tar xf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
echo "Add Registry back" echo "Add Registry back"
docker load -i "$AGDOCKER/registry_image.tar" docker load -i "$AGDOCKER/registry_image.tar"
echo "Restart registry container"
salt-call state.apply registry queue=True
fi fi
fi fi
} }

View File

@@ -271,7 +271,7 @@ def parse_hardware_indices(hw_value: Any) -> List[int]:
return indices return indices
def get_hypervisor_model(hypervisor: str) -> str: def get_hypervisor_model(hypervisor: str) -> str:
"""Get sosmodel from hypervisor grains.""" """Get sosmodel or byodmodel from hypervisor grains."""
try: try:
# Get cached grains using Salt runner # Get cached grains using Salt runner
grains = runner.cmd( grains = runner.cmd(
@@ -283,9 +283,9 @@ def get_hypervisor_model(hypervisor: str) -> str:
# Get the first minion ID that matches our hypervisor # Get the first minion ID that matches our hypervisor
minion_id = next(iter(grains.keys())) minion_id = next(iter(grains.keys()))
model = grains[minion_id].get('sosmodel') model = grains[minion_id].get('sosmodel', grains[minion_id].get('byodmodel', ''))
if not model: if not model:
raise ValueError(f"No sosmodel grain found for hypervisor {hypervisor}") raise ValueError(f"No sosmodel or byodmodel grain found for hypervisor {hypervisor}")
log.debug("Found model %s for hypervisor %s", model, hypervisor) log.debug("Found model %s for hypervisor %s", model, hypervisor)
return model return model

View File

@@ -1493,6 +1493,7 @@ soc:
folder: securityonion-normalized folder: securityonion-normalized
assistant: assistant:
apiUrl: https://onionai.securityonion.net apiUrl: https://onionai.securityonion.net
healthTimeoutSeconds: 3
salt: salt:
queueDir: /opt/sensoroni/queue queueDir: /opt/sensoroni/queue
timeoutMs: 45000 timeoutMs: 45000

View File

@@ -585,6 +585,10 @@ soc:
description: The URL of the AI gateway. description: The URL of the AI gateway.
advanced: True advanced: True
global: True global: True
healthTimeoutSeconds:
description: Timeout in seconds for the Onion AI health check.
global: True
advanced: True
client: client:
assistant: assistant:
enabled: enabled:
@@ -615,6 +619,7 @@ soc:
advanced: True advanced: True
lowBalanceColorAlert: lowBalanceColorAlert:
description: Onion AI credit amount at which balance turns red. description: Onion AI credit amount at which balance turns red.
global: True
advanced: True advanced: True
apiTimeoutMs: apiTimeoutMs:
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.

View File

@@ -1202,9 +1202,6 @@ hypervisor_local_states() {
logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True" logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True"
info "Setting up bridge for $MNIC" info "Setting up bridge for $MNIC"
salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True
#if [ $is_managerhype ]; then
# logCmd "salt-call state.apply salt.minion queue=True"
#fi
fi fi
} }