Compare commits

...

17 Commits

Author SHA1 Message Date
Mike Reeves
44594ba726 Update defaults.yaml 2025-11-10 14:24:27 -05:00
Jorge Reyes
5b371c220c Merge pull request #15207 from Security-Onion-Solutions/reyesj2/forwardnode-sensor 2025-11-10 08:46:12 -06:00
reyesj2
a84df14137 rename forward node -> sensor node 2025-11-06 15:23:55 -06:00
Jorge Reyes
202b03b32b Merge pull request #15201 from Security-Onion-Solutions/reyesj2-patch-5
update so-elasticsearch-retention-estimate
2025-11-06 08:18:38 -06:00
reyesj2
1aa871ec94 small fixes 2025-11-05 17:55:57 -06:00
Jorge Reyes
f859fe6517 Merge pull request #15192 from Security-Onion-Solutions/securityonion-strelka
strelka use single master image
2025-11-05 08:07:01 -06:00
Jason Ertel
021b425b8b Merge pull request #15198 from Security-Onion-Solutions/jertel/wip
ensure previous setup outcomes are cleared
2025-11-04 16:10:53 -05:00
Jason Ertel
d95122ca01 ensure previous setup outcomes are cleared 2025-11-04 16:02:39 -05:00
Josh Patterson
81d3c7351b Merge pull request #15194 from Security-Onion-Solutions/reyesj2/ea-policy
move off of cmd.script with args \
2025-11-03 17:16:35 -05:00
Josh Patterson
ccb8ffd6eb Update install_agent_grid.sls 2025-11-03 17:05:48 -05:00
Josh Patterson
60228ec6e6 Merge pull request #15193 from Security-Onion-Solutions/salt300616
Salt 3006.16
2025-11-03 16:02:25 -05:00
Josh Patterson
574703e551 unlock/lock salt-cloud if installed 2025-11-03 15:39:19 -05:00
Josh Patterson
fa154f1a8f update salt cloud config if configured 2025-11-03 14:12:19 -05:00
reyesj2
635545630b strelka use single master image 2025-11-03 09:36:46 -06:00
Mike Reeves
df8afda999 Merge pull request #15188 from Security-Onion-Solutions/cogburn/multiple-models
Available Models
2025-11-03 09:39:16 -05:00
Corey Ogburn
f80b090c93 Update limits 2025-10-31 14:48:30 -06:00
Corey Ogburn
806173f7e3 Available Models
Utilizes Jason's new Array of Objects UI.
2025-10-31 14:07:30 -06:00
14 changed files with 90 additions and 43 deletions

View File

@@ -395,7 +395,7 @@ is_manager_node() {
} }
is_sensor_node() { is_sensor_node() {
# Check to see if this is a sensor (forward) node # Check to see if this is a sensor node
is_single_node_grid && return 0 is_single_node_grid && return 0
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
} }

View File

@@ -62,8 +62,6 @@ container_list() {
"so-soc" "so-soc"
"so-steno" "so-steno"
"so-strelka-backend" "so-strelka-backend"
"so-strelka-filestream"
"so-strelka-frontend"
"so-strelka-manager" "so-strelka-manager"
"so-suricata" "so-suricata"
"so-telegraf" "so-telegraf"

View File

@@ -2,8 +2,10 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
{%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%} {% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
{%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%} {% if grains.role == 'so-heavynode' %}
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% endif %}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% if not AGENT_STATUS %} {% if not AGENT_STATUS %}
@@ -15,19 +17,13 @@ pull_agent_installer:
- mode: 755 - mode: 755
- makedirs: True - makedirs: True
{% if grains.role not in ['so-heavynode'] %}
run_installer: run_installer:
cmd.run: cmd.run:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENGENERAL }} - name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
- cwd: /opt/so - cwd: /opt/so
- retry: True - retry:
{% else %} attempts: 3
run_installer: interval: 20
cmd.run:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENHEAVY }}
- cwd: /opt/so
- retry: True
{% endif %}
cleanup_agent_installer: cleanup_agent_installer:
file.absent: file.absent:

View File

@@ -41,13 +41,13 @@ create_temp_file() {
} }
log_title() { log_title() {
if [ $1 == "LOG" ]; then if [ "$1" == "LOG" ]; then
echo -e "\n${BOLD}================ $2 ================${NC}\n" echo -e "\n${BOLD}================ $2 ================${NC}\n"
elif [ $1 == "OK" ]; then elif [ "$1" == "OK" ]; then
echo -e "${GREEN} $2 ${NC}" echo -e "${GREEN} $2 ${NC}"
elif [ $1 == "WARN" ]; then elif [ "$1" == "WARN" ]; then
echo -e "${YELLOW} $2 ${NC}" echo -e "${YELLOW} $2 ${NC}"
elif [ $1 == "ERROR" ]; then elif [ "$1" == "ERROR" ]; then
echo -e "${RED} $2 ${NC}" echo -e "${RED} $2 ${NC}"
fi fi
} }
@@ -756,7 +756,7 @@ if [ "$should_trigger_recommendations" = true ]; then
ilm_output=$(so-elasticsearch-query "${index}/_ilm/explain" --fail 2>/dev/null) || true ilm_output=$(so-elasticsearch-query "${index}/_ilm/explain" --fail 2>/dev/null) || true
if [ -n "$ilm_output" ]; then if [ -n "$ilm_output" ]; then
policy=$(echo "$ilm_output" | jq -r ".indices.\"$index\".policy // empty" 2>/dev/null) policy=$(echo "$ilm_output" | jq --arg idx "$index" -r ".indices[$idx].policy // empty" 2>/dev/null)
fi fi
if [ -n "$policy" ] && [ -n "${policy_ages[$policy]:-}" ]; then if [ -n "$policy" ] && [ -n "${policy_ages[$policy]:-}" ]; then
delete_min_age=${policy_ages[$policy]} delete_min_age=${policy_ages[$policy]}
@@ -1134,8 +1134,9 @@ else
for i in "${!scheduled_indices_names[@]}"; do for i in "${!scheduled_indices_names[@]}"; do
sorted_indices+=("${scheduled_indices_days[$i]}|${scheduled_indices_names[$i]}|${scheduled_indices_sizes[$i]}") sorted_indices+=("${scheduled_indices_days[$i]}|${scheduled_indices_names[$i]}|${scheduled_indices_sizes[$i]}")
done done
OLD_IFS="$IFS"
IFS=$'\n' sorted_indices=($(sort -t'|' -k1 -n <<<"${sorted_indices[*]}")) IFS=$'\n' sorted_indices=($(sort -t'|' -k1 -n <<<"${sorted_indices[*]}"))
unset IFS IFS="$OLD_IFS"
for entry in "${sorted_indices[@]}"; do for entry in "${sorted_indices[@]}"; do
IFS='|' read -r days_until index_name size_bytes <<< "$entry" IFS='|' read -r days_until index_name size_bytes <<< "$entry"

View File

@@ -23,6 +23,7 @@ TOPFILE=/opt/so/saltstack/default/salt/top.sls
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
SALTUPGRADED=false SALTUPGRADED=false
SALT_CLOUD_INSTALLED=false SALT_CLOUD_INSTALLED=false
SALT_CLOUD_CONFIGURED=false
# used to display messages to the user at the end of soup # used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=() declare -a FINAL_MESSAGE_QUEUE=()
@@ -1270,6 +1271,10 @@ upgrade_salt() {
if rpm -q salt-cloud &>/dev/null; then if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true SALT_CLOUD_INSTALLED=true
fi fi
# Check if salt-cloud is configured
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
SALT_CLOUD_CONFIGURED=true
fi
echo "Removing yum versionlock for Salt." echo "Removing yum versionlock for Salt."
echo "" echo ""
@@ -1587,7 +1592,7 @@ main() {
# ensure the mine is updated and populated before highstates run, following the salt-master restart # ensure the mine is updated and populated before highstates run, following the salt-master restart
update_salt_mine update_salt_mine
if [[ $SALT_CLOUD_INSTALLED == true && $SALTUPGRADED == true ]]; then if [[ $SALT_CLOUD_CONFIGURED == true && $SALTUPGRADED == true ]]; then
echo "Updating salt-cloud config to use the new Salt version" echo "Updating salt-cloud config to use the new Salt version"
salt-call state.apply salt.cloud.config concurrent=True salt-call state.apply salt.cloud.config concurrent=True
fi fi
@@ -1674,7 +1679,7 @@ This appears to be a distributed deployment. Other nodes should update themselve
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete. Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
If it looks like youre missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC. If it looks like youre missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Sensor nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments. For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments.

View File

@@ -7,7 +7,7 @@ pcap:
description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting. description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting.
helpLink: stenographer.html helpLink: stenographer.html
diskfreepercentage: diskfreepercentage:
description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated forward nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then youll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21. description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated Sensor nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then youll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21.
helpLink: stenographer.html helpLink: stenographer.html
blocks: blocks:
description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this. description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this.

View File

@@ -36,6 +36,11 @@ cloud_profiles:
SALTVERSION: {{ SALTVERSION }} SALTVERSION: {{ SALTVERSION }}
- template: jinja - template: jinja
- makedirs: True - makedirs: True
{% else %}
no_hypervisors_configured:
test.succeed_without_changes:
- name: no_hypervisors_configured
- comment: No hypervisors are configured
{% endif %} {% endif %}
{% else %} {% else %}

View File

@@ -2552,9 +2552,27 @@ soc:
assistant: assistant:
enabled: false enabled: false
investigationPrompt: Investigate Alert ID {socId} investigationPrompt: Investigate Alert ID {socId}
contextLimitSmall: 200000
contextLimitLarge: 1000000
thresholdColorRatioLow: 0.5 thresholdColorRatioLow: 0.5
thresholdColorRatioMed: 0.75 thresholdColorRatioMed: 0.75
thresholdColorRatioMax: 1 thresholdColorRatioMax: 1
lowBalanceColorAlert: 500000 availableModels:
- id: sonnet-4
displayName: Claude Sonnet 4
contextLimitSmall: 200000
contextLimitLarge: 1000000
lowBalanceColorAlert: 500000
- id: sonnet-4.5
displayName: Claude Sonnet 4.5
contextLimitSmall: 200000
contextLimitLarge: 1000000
lowBalanceColorAlert: 500000
- id: gptoss-120b
displayName: GPT-OSS 120B
contextLimitSmall: 128000
contextLimitLarge: 128000
lowBalanceColorAlert: 500000
- id: qwen-235b
displayName: QWEN 235B
contextLimitSmall: 256000
contextLimitLarge: 256000
lowBalanceColorAlert: 500000

View File

@@ -606,14 +606,6 @@ soc:
investigationPrompt: investigationPrompt:
description: Prompt given to Onion AI when beginning an investigation. description: Prompt given to Onion AI when beginning an investigation.
global: True global: True
contextLimitSmall:
description: Smaller context limit for Onion AI.
global: True
advanced: True
contextLimitLarge:
description: Larger context limit for Onion AI.
global: True
advanced: True
thresholdColorRatioLow: thresholdColorRatioLow:
description: Lower visual context color change threshold. description: Lower visual context color change threshold.
global: True global: True
@@ -630,6 +622,32 @@ soc:
description: Onion AI credit amount at which balance turns red. description: Onion AI credit amount at which balance turns red.
global: True global: True
advanced: True advanced: True
availableModels:
description: List of AI models available for use in SOC as well as model specific warning thresholds.
global: True
advanced: True
forcedType: "[]{}"
helpLink: assistant.html
syntax: json
uiElements:
- field: id
label: Model ID
required: True
- field: displayName
label: Display Name
required: True
- field: contextLimitSmall
label: Context Limit (Small)
forcedType: int
required: True
- field: contextLimitLarge
label: Context Limit (Large)
forcedType: int
required: True
- field: lowBalanceColorAlert
label: Low Balance Color Alert
forcedType: int
required: True
apiTimeoutMs: apiTimeoutMs:
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
global: True global: True

View File

@@ -14,7 +14,7 @@ include:
strelka_filestream: strelka_filestream:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-filestream:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
- binds: - binds:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro - /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka - /nsm/strelka:/nsm/strelka

View File

@@ -14,7 +14,7 @@ include:
strelka_frontend: strelka_frontend:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-frontend:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
- binds: - binds:
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro - /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
- /nsm/strelka/log/:/var/log/strelka/:rw - /nsm/strelka/log/:/var/log/strelka/:rw

View File

@@ -1646,6 +1646,12 @@ reserve_ports() {
fi fi
} }
clear_previous_setup_results() {
# Disregard previous setup outcomes.
rm -f /root/failure
rm -f /root/success
}
reinstall_init() { reinstall_init() {
info "Putting system in state to run setup again" info "Putting system in state to run setup again"
@@ -1657,10 +1663,6 @@ reinstall_init() {
local service_retry_count=20 local service_retry_count=20
# Disregard previous install outcomes
rm -f /root/failure
rm -f /root/success
{ {
# remove all of root's cronjobs # remove all of root's cronjobs
logCmd "crontab -r -u root" logCmd "crontab -r -u root"

View File

@@ -132,6 +132,10 @@ if [[ -f /root/accept_changes ]]; then
reset_proxy reset_proxy
fi fi
# Previous setup attempts, even if setup doesn't actually start the installation,
# can leave behind results that may interfere with the current setup attempt.
clear_previous_setup_results
title "Parsing Username for Install" title "Parsing Username for Install"
parse_install_username parse_install_username

View File

@@ -676,8 +676,8 @@ whiptail_install_type_dist_existing() {
EOM EOM
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \ install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \
"SENSOR" "Create a forward only sensor " \ "SENSOR" "Add a Sensor Node for monitoring network traffic " \
"SEARCHNODE" "Add a search node with parsing " \ "SEARCHNODE" "Add a Search Node with parsing " \
"FLEET" "Dedicated Elastic Fleet Node " \ "FLEET" "Dedicated Elastic Fleet Node " \
"HEAVYNODE" "Sensor + Search Node " \ "HEAVYNODE" "Sensor + Search Node " \
"IDH" "Intrusion Detection Honeypot Node " \ "IDH" "Intrusion Detection Honeypot Node " \