mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-02-20 14:05:26 +01:00
Compare commits
38 Commits
reyesj2-pa
...
reyesj2/mn
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
256c1122c3 | ||
|
|
aa2a1a3d3c | ||
|
|
0ebd8e4d6c | ||
|
|
3349c1a936 | ||
|
|
7dfd212519 | ||
|
|
b8fb0fa735 | ||
|
|
e6f767b613 | ||
|
|
d00fb4ccf7 | ||
|
|
a29eff37a0 | ||
|
|
4c86275cd6 | ||
|
|
a1c806a944 | ||
|
|
3d1a2c12ec | ||
|
|
8538e5572e | ||
|
|
9b525612a8 | ||
|
|
fb364aec5d | ||
|
|
ed014b431e | ||
|
|
82ca64d66f | ||
|
|
7e0fb73fec | ||
|
|
c28bcfa85e | ||
|
|
be6d94d65b | ||
|
|
ada463320b | ||
|
|
2b05583035 | ||
|
|
4d6b2de374 | ||
|
|
41d94b6bfd | ||
|
|
2d74002e9e | ||
|
|
04a757dde0 | ||
|
|
e7e379ce82 | ||
|
|
fe0178b8ac | ||
|
|
0661c3af1a | ||
|
|
4778bd6680 | ||
|
|
5033462098 | ||
|
|
6b4b1d74fd | ||
|
|
f0df6a171c | ||
|
|
dc4cd93c02 | ||
|
|
19157aa76c | ||
|
|
1c092bf791 | ||
|
|
ff8790b35b | ||
|
|
c6168c1487 |
@@ -13,7 +13,7 @@
|
||||
{% endif %}
|
||||
|
||||
{% if PCAPBPF %}
|
||||
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
|
||||
{% set PCAP_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
|
||||
{% if PCAP_BPF_CALC['retcode'] == 0 %}
|
||||
{% set PCAP_BPF_STATUS = 1 %}
|
||||
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
{% set SURICATABPF = BPFMERGED.suricata %}
|
||||
|
||||
{% if SURICATABPF %}
|
||||
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
|
||||
{% set SURICATA_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
|
||||
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
|
||||
{% set SURICATA_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
{% set ZEEKBPF = BPFMERGED.zeek %}
|
||||
|
||||
{% if ZEEKBPF %}
|
||||
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
|
||||
{% set ZEEK_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
|
||||
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
|
||||
{% set ZEEK_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
|
||||
@@ -84,13 +84,6 @@ elasticsearch:
|
||||
custom008: *pipelines
|
||||
custom009: *pipelines
|
||||
custom010: *pipelines
|
||||
managed_integrations:
|
||||
description: List of integrations to add into SOC config UI. Enter the full or partial integration name. Eg. 1password, 1pass
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
index_settings:
|
||||
global_overrides:
|
||||
index_template:
|
||||
|
||||
@@ -32,7 +32,7 @@ global:
|
||||
readonly: True
|
||||
advanced: True
|
||||
url_base:
|
||||
description: Used for handling of authentication cookies.
|
||||
description: The base URL for the Security Onion Console. Must be accessible by all nodes in the grid, as well as all analysts. Also used for handling of authentication cookies. Can be an IP address or a hostname/FQDN. Do not include protocol (http/https) or port number.
|
||||
global: True
|
||||
airgap:
|
||||
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{# Managed elasticsearch/soc_elasticsearch.yaml file for adding integration configuration items to UI #}
|
||||
{% set managed_integrations = salt['pillar.get']('elasticsearch:managed_integrations', []) %}
|
||||
{% set managed_integrations = salt['pillar.get']('manager:managed_integrations', []) %}
|
||||
{% if managed_integrations and salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||
{% set addon_integration_keys = ADDON_INTEGRATION_DEFAULTS.keys() %}
|
||||
|
||||
@@ -78,3 +78,10 @@ manager:
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
forcedType: int
|
||||
managed_integrations:
|
||||
description: List of integrations to add into SOC config UI. Enter the full or partial integration name. Eg. 1password, 1pass
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
@@ -93,10 +93,6 @@ check_err() {
|
||||
161)
|
||||
echo 'Required intermediate Elasticsearch upgrade not complete'
|
||||
;;
|
||||
170)
|
||||
echo "Intermediate upgrade completed successfully to $next_step_so_version, but next soup to Security Onion $originally_requested_so_version could not be started automatically."
|
||||
echo "Start soup again manually to continue the upgrade to Security Onion $originally_requested_so_version."
|
||||
;;
|
||||
*)
|
||||
echo 'Unhandled error'
|
||||
echo "$err_msg"
|
||||
@@ -158,7 +154,7 @@ EOF
|
||||
echo "Ensure you verify the ISO that you downloaded."
|
||||
exit 0
|
||||
else
|
||||
echo "Device has been mounted! $(cat /tmp/soagupdate/SecurityOnion/VERSION)"
|
||||
echo "Device has been mounted!"
|
||||
fi
|
||||
else
|
||||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||||
@@ -701,6 +697,19 @@ post_to_2.4.210() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
# migrate elasticsearch:managed_integrations pillar to manager:managed_integrations
|
||||
if managed_integrations=$(/usr/sbin/so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.managed_integrations); then
|
||||
local managed_integrations_old_pillar="/tmp/elasticsearch-managed_integrations.yaml"
|
||||
|
||||
echo "Migrating managed_integrations pillar"
|
||||
echo -e "$managed_integrations" > "$managed_integrations_old_pillar"
|
||||
|
||||
/usr/sbin/so-yaml.py add /opt/so/saltstack/local/pillar/manager/soc_manager.sls manager.managed_integrations file:$managed_integrations_old_pillar > /dev/null 2>&1
|
||||
|
||||
/usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.managed_integrations
|
||||
fi
|
||||
|
||||
|
||||
POSTVERSION=2.4.210
|
||||
}
|
||||
|
||||
@@ -988,7 +997,9 @@ up_to_2.4.210() {
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
create_ca_pillar
|
||||
|
||||
# This state is used to deal with the breaking change introduced in 3006.17 - https://docs.saltproject.io/en/3006/topics/releases/3006.17.html
|
||||
# This is the only way the state is called so we can use concurrent=True
|
||||
salt-call state.apply salt.master.add_minimum_auth_version --file-root=$UPDATE_DIR/salt --local concurrent=True
|
||||
INSTALLEDVERSION=2.4.210
|
||||
}
|
||||
|
||||
@@ -1686,218 +1697,115 @@ verify_latest_update_script() {
|
||||
|
||||
verify_es_version_compatibility() {
|
||||
|
||||
local es_required_version_statefile_base="/opt/so/state/so_es_required_upgrade_version"
|
||||
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
|
||||
local is_active_intermediate_upgrade=1
|
||||
# supported upgrade paths for SO-ES versions
|
||||
declare -A es_upgrade_map=(
|
||||
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
|
||||
["8.17.3"]="8.18.4 8.18.6 8.18.8"
|
||||
["8.18.4"]="8.18.6 8.18.8 9.0.8"
|
||||
["8.18.6"]="8.18.8 9.0.8"
|
||||
["8.18.8"]="9.0.8"
|
||||
)
|
||||
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt"
|
||||
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
|
||||
# supported upgrade paths for SO-ES versions
|
||||
declare -A es_upgrade_map=(
|
||||
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
|
||||
["8.17.3"]="8.18.4 8.18.6 8.18.8"
|
||||
["8.18.4"]="8.18.6 8.18.8 9.0.8"
|
||||
["8.18.6"]="8.18.8 9.0.8"
|
||||
["8.18.8"]="9.0.8"
|
||||
)
|
||||
|
||||
# Elasticsearch MUST upgrade through these versions
|
||||
declare -A es_to_so_version=(
|
||||
["8.18.8"]="2.4.190-20251024"
|
||||
)
|
||||
# Elasticsearch MUST upgrade through these versions
|
||||
declare -A es_to_so_version=(
|
||||
["8.18.8"]="2.4.190-20251024"
|
||||
)
|
||||
|
||||
# Get current Elasticsearch version
|
||||
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
|
||||
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
|
||||
else
|
||||
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
|
||||
# Get current Elasticsearch version
|
||||
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
|
||||
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
|
||||
else
|
||||
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
|
||||
exit 160
|
||||
fi
|
||||
|
||||
exit 160
|
||||
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
|
||||
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
|
||||
|
||||
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
|
||||
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
|
||||
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
|
||||
exit 160
|
||||
fi
|
||||
|
||||
if ! target_es_version_raw=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
|
||||
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
|
||||
# allow upgrade to version < 2.4.110 without checking ES version compatibility
|
||||
return 0
|
||||
|
||||
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
|
||||
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
|
||||
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
|
||||
fi
|
||||
|
||||
exit 160
|
||||
fi
|
||||
|
||||
# allow upgrade to version < 2.4.110 without checking ES version compatibility
|
||||
return 0
|
||||
else
|
||||
target_es_version=$(sed -n '1p' <<< "$target_es_version_raw")
|
||||
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
|
||||
if [[ -f "$es_required_version_statefile" ]]; then
|
||||
# required so verification script should have already been created
|
||||
if [[ ! -f "$es_verification_script" ]]; then
|
||||
create_intermediate_upgrade_verification_script $es_verification_script
|
||||
fi
|
||||
|
||||
for statefile in "${es_required_version_statefile_base}"-*; do
|
||||
[[ -f $statefile ]] || continue
|
||||
|
||||
local es_required_version_statefile_value=$(cat "$statefile")
|
||||
|
||||
if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
|
||||
echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
|
||||
is_active_intermediate_upgrade=0
|
||||
continue
|
||||
fi
|
||||
|
||||
# use sort to check if es_required_statefile_value is < the current es_version.
|
||||
if [[ "$(printf '%s\n' $es_required_version_statefile_value $es_version | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
|
||||
rm -f "$statefile"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ ! -f "$es_verification_script" ]]; then
|
||||
create_intermediate_upgrade_verification_script "$es_verification_script"
|
||||
fi
|
||||
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
|
||||
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
|
||||
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
|
||||
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
|
||||
exit 161
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
done
|
||||
|
||||
# if current soup is an intermediate upgrade we can skip the upgrade map check below
|
||||
if [[ $is_active_intermediate_upgrade -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
|
||||
# supported upgrade
|
||||
return 0
|
||||
else
|
||||
compatible_versions=${es_upgrade_map[$es_version]}
|
||||
if [[ -z "$compatible_versions" ]]; then
|
||||
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
|
||||
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
|
||||
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
|
||||
next_step_so_version=${es_to_so_version[$first_es_required_version]}
|
||||
required_es_upgrade_version="$first_es_required_version"
|
||||
else
|
||||
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
|
||||
required_es_upgrade_version="${compatible_versions##* }"
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
|
||||
|
||||
es_required_version_statefile="${es_required_version_statefile_base}-${required_es_upgrade_version}"
|
||||
echo "$required_es_upgrade_version" > "$es_required_version_statefile"
|
||||
|
||||
# We expect to upgrade to the latest compatiable minor version of ES
|
||||
create_intermediate_upgrade_verification_script "$es_verification_script"
|
||||
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
run_airgap_intermediate_upgrade
|
||||
else
|
||||
if [[ ! -z $ISOLOC ]]; then
|
||||
originally_requested_iso_location="$ISOLOC"
|
||||
fi
|
||||
# Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
|
||||
unset ISOLOC
|
||||
|
||||
run_network_intermediate_upgrade
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
run_airgap_intermediate_upgrade() {
|
||||
local originally_requested_so_version=$(cat $UPDATE_DIR/VERSION)
|
||||
# preserve ISOLOC value, so we can try to use it post intermediate upgrade
|
||||
local originally_requested_iso_location="$ISOLOC"
|
||||
|
||||
# make sure a fresh ISO gets mounted
|
||||
unmount_update
|
||||
|
||||
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
|
||||
echo -e "\nIf you have the next ISO / USB ready, enter the path now eg. /dev/sdd, /home/onion/securityonion-$next_step_so_version.iso:"
|
||||
|
||||
while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
|
||||
# List removable devices if any are present
|
||||
local removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
|
||||
if [[ -n "$removable_devices" ]]; then
|
||||
echo "PATH SIZE TYPE MOUNTPOINTS RM"
|
||||
echo "$removable_devices"
|
||||
fi
|
||||
|
||||
read -rp "Device/ISO Path (or 'exit' to quit): " next_iso_location
|
||||
if [[ "${next_iso_location,,}" == "exit" ]]; then
|
||||
echo "Exiting soup. Before reattempting to upgrade to $originally_requested_so_version, please first upgrade to $next_step_so_version to ensure Elasticsearch can properly update through the required versions."
|
||||
|
||||
exit 160
|
||||
fi
|
||||
|
||||
if [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; then
|
||||
echo "$next_iso_location is not a valid file or block device."
|
||||
next_iso_location=""
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Using $next_iso_location for required intermediary upgrade."
|
||||
exec bash <<EOF
|
||||
ISOLOC=$next_iso_location soup -y && \
|
||||
ISOLOC=$next_iso_location soup -y && \
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
||||
|
||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
|
||||
# automatically start the next soup if the original ISO isn't using the same block device we just used
|
||||
if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
|
||||
umount /tmp/soagupdate
|
||||
ISOLOC=$originally_requested_iso_location soup -y && \
|
||||
ISOLOC=$originally_requested_iso_location soup -y
|
||||
else
|
||||
echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \
|
||||
|
||||
exit 170
|
||||
fi
|
||||
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
EOF
|
||||
}
|
||||
|
||||
run_network_intermediate_upgrade() {
|
||||
# preserve BRANCH value if set originally
|
||||
if [[ -n "$BRANCH" ]]; then
|
||||
local originally_requested_so_branch="$BRANCH"
|
||||
else
|
||||
local originally_requested_so_branch="2.4/main"
|
||||
fi
|
||||
|
||||
echo "Starting automated intermediate upgrade to $next_step_so_version."
|
||||
echo "After completion, the system will automatically attempt to upgrade to the latest version."
|
||||
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
exec bash << EOF
|
||||
BRANCH=$next_step_so_version soup -y && \
|
||||
BRANCH=$next_step_so_version soup -y && \
|
||||
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss!"
|
||||
# create script using version in statefile
|
||||
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
||||
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
|
||||
|
||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
exit 161
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
fi
|
||||
|
||||
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
|
||||
# supported upgrade
|
||||
return 0
|
||||
else
|
||||
compatible_versions=${es_upgrade_map[$es_version]}
|
||||
if [[ -z "$compatible_versions" ]]; then
|
||||
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
|
||||
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
|
||||
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
|
||||
next_step_so_version=${es_to_so_version[$first_es_required_version]}
|
||||
required_es_upgrade_version="$first_es_required_version"
|
||||
else
|
||||
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
|
||||
required_es_upgrade_version="${compatible_versions##* }"
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
|
||||
|
||||
echo "$required_es_upgrade_version" > "$es_required_version_statefile"
|
||||
|
||||
# We expect to upgrade to the latest compatiable minor version of ES
|
||||
create_intermediate_upgrade_verification_script $es_verification_script
|
||||
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
|
||||
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
exit 160
|
||||
else
|
||||
# preserve BRANCH value if set originally
|
||||
if [[ -n "$BRANCH" ]]; then
|
||||
local originally_requested_so_version="$BRANCH"
|
||||
else
|
||||
local originally_requested_so_version="2.4/main"
|
||||
fi
|
||||
|
||||
echo "Starting automated intermediate upgrade to $next_step_so_version."
|
||||
echo "After completion, the system will automatically attempt to upgrade to the latest version."
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
|
||||
echo -e \"\n##############################################################################################################################\n\" && \
|
||||
echo -e \"Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
|
||||
&& timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
||||
echo -e \"\n##############################################################################################################################\n\" \
|
||||
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
if [[ -n "$originally_requested_iso_location" ]]; then
|
||||
# nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
|
||||
ISOLOC=$originally_requested_iso_location soup -y && \
|
||||
ISOLOC=$originally_requested_iso_location soup -y
|
||||
else
|
||||
BRANCH=$originally_requested_so_branch soup -y && \
|
||||
BRANCH=$originally_requested_so_branch soup -y
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
EOF
|
||||
}
|
||||
|
||||
create_intermediate_upgrade_verification_script() {
|
||||
@@ -2128,7 +2036,6 @@ main() {
|
||||
echo "Verifying we have the latest soup script."
|
||||
verify_latest_update_script
|
||||
|
||||
echo "Verifying Elasticsearch version compatibility before upgrading."
|
||||
verify_es_version_compatibility
|
||||
|
||||
echo "Let's see if we need to update Security Onion."
|
||||
|
||||
73
salt/salt/engines/master/minimum_auth_version.py
Normal file
73
salt/salt/engines/master/minimum_auth_version.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
import salt.client
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
TIMESTAMP_FILE = '/opt/so/state/mav_engine_start_time'
|
||||
|
||||
def _get_start_time():
|
||||
"""Read persisted start time from file, or create one if it doesn't exist."""
|
||||
if os.path.exists(TIMESTAMP_FILE):
|
||||
with open(TIMESTAMP_FILE, 'r') as f:
|
||||
timestamp = f.read().strip()
|
||||
start_time = datetime.fromisoformat(timestamp)
|
||||
log.info("Loaded existing start time from %s: %s", TIMESTAMP_FILE, start_time)
|
||||
return start_time
|
||||
|
||||
start_time = datetime.now()
|
||||
with open(TIMESTAMP_FILE, 'w') as f:
|
||||
f.write(start_time.isoformat())
|
||||
log.info("No existing start time found. Persisted new start time: %s", start_time)
|
||||
return start_time
|
||||
|
||||
|
||||
def _clear_start_time():
|
||||
"""Remove the persisted timestamp file after successful completion."""
|
||||
if os.path.exists(TIMESTAMP_FILE):
|
||||
os.remove(TIMESTAMP_FILE)
|
||||
log.info("Removed timestamp file %s", TIMESTAMP_FILE)
|
||||
|
||||
|
||||
def start(wait_days=7):
|
||||
"""
|
||||
This engine waits for the specified number of days, then changes minimum_auth_version.
|
||||
|
||||
Args:
|
||||
wait_days: Days to wait before taking action (default: 7)
|
||||
"""
|
||||
log.info(
|
||||
"Starting minimum_auth_version engine - Wait time: %d days",
|
||||
wait_days
|
||||
)
|
||||
|
||||
start_time = _get_start_time()
|
||||
wait_delta = timedelta(days=wait_days)
|
||||
mav_removed = False
|
||||
caller = salt.client.Caller()
|
||||
|
||||
while True:
|
||||
if not mav_removed:
|
||||
elapsed = datetime.now() - start_time
|
||||
|
||||
if elapsed >= wait_delta:
|
||||
log.info("Changing minimum_auth_version")
|
||||
_clear_start_time()
|
||||
result = caller.cmd('state.apply', 'salt.master.remove_minimum_auth_version', queue=True)
|
||||
# We shouldn't reach this line since the above line should remove the engine and restart salt-master
|
||||
log.info("State apply result: %s", result)
|
||||
mav_removed = True
|
||||
else:
|
||||
target_time = start_time + wait_delta
|
||||
log.info("minimum_auth_version will be changed within an hour of %s", target_time.strftime('%m-%d-%Y %H:%M'))
|
||||
|
||||
time.sleep(3600) # Check hourly
|
||||
@@ -1,4 +1,4 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
master:
|
||||
version: '3006.16'
|
||||
version: '3006.19'
|
||||
|
||||
23
salt/salt/master/add_minimum_auth_version.sls
Normal file
23
salt/salt/master/add_minimum_auth_version.sls
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# This state is to be used during soup preupgrade_changes, and run when the salt-master has been stopped. Soup will later start the salt-master.
|
||||
# This state is used to deal with the breaking change introduced in 3006.17 - https://docs.saltproject.io/en/3006/topics/releases/3006.17.html
|
||||
|
||||
|
||||
set_minimum_auth_version_0:
|
||||
file.managed:
|
||||
- name: /etc/salt/master.d/minimum_auth_version.conf
|
||||
- source: salt://salt/master/files/minimum_auth_version.conf
|
||||
|
||||
add_minimum_auth_version_engine_config:
|
||||
file.managed:
|
||||
- name: /etc/salt/master.d/minimum_auth_version_engine.conf
|
||||
- source: salt://salt/master/files/minimum_auth_version_engine.conf
|
||||
|
||||
add_minimum_auth_version_engine:
|
||||
file.managed:
|
||||
- name: /etc/salt/engines/minimum_auth_version.py
|
||||
- source: salt://salt/engines/master/minimum_auth_version.py
|
||||
1
salt/salt/master/files/minimum_auth_version.conf
Normal file
1
salt/salt/master/files/minimum_auth_version.conf
Normal file
@@ -0,0 +1 @@
|
||||
minimum_auth_version: 0
|
||||
3
salt/salt/master/files/minimum_auth_version_engine.conf
Normal file
3
salt/salt/master/files/minimum_auth_version_engine.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
engines:
|
||||
- minimum_auth_version:
|
||||
wait_days: 7
|
||||
21
salt/salt/master/remove_minimum_auth_version.sls
Normal file
21
salt/salt/master/remove_minimum_auth_version.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
include:
|
||||
- salt.master
|
||||
|
||||
unset_minimum_auth_version_0:
|
||||
file.absent:
|
||||
- name: /etc/salt/master.d/minimum_auth_version.conf
|
||||
|
||||
remove_minimum_auth_version_engine_config:
|
||||
file.absent:
|
||||
- name: /etc/salt/master.d/minimum_auth_version_engine.conf
|
||||
|
||||
remove_minimum_auth_version_engine:
|
||||
file.absent:
|
||||
- name: /etc/salt/engines/minimum_auth_version.py
|
||||
- watch_in:
|
||||
- service: salt_master_service
|
||||
@@ -1,5 +1,5 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
minion:
|
||||
version: '3006.16'
|
||||
version: '3006.19'
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#======================================================================================================================
|
||||
set -o nounset # Treat unset variables as an error
|
||||
|
||||
__ScriptVersion="2025.09.03"
|
||||
__ScriptVersion="2026.01.22"
|
||||
__ScriptName="bootstrap-salt.sh"
|
||||
|
||||
__ScriptFullName="$0"
|
||||
@@ -369,7 +369,7 @@ __usage() {
|
||||
also be specified. Salt installation will be ommitted, but some of the
|
||||
dependencies could be installed to write configuration with -j or -J.
|
||||
-d Disables checking if Salt services are enabled to start on system boot.
|
||||
You can also do this by touching ${BS_TMP_DIR}/disable_salt_checks on the target
|
||||
You can also do this by touching ${_TMP_DIR}/disable_salt_checks on the target
|
||||
host. Default: \${BS_FALSE}
|
||||
-D Show debug output
|
||||
-f Force shallow cloning for git installations.
|
||||
@@ -2819,14 +2819,25 @@ __install_salt_from_repo() {
|
||||
${_pip_cmd} install --force-reinstall --break-system-packages "${_arch_dep}"
|
||||
fi
|
||||
|
||||
echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl'"
|
||||
_PIP_VERSION_STRING=$(${_pip_cmd} --version)
|
||||
echodebug "Installed pip version: $_PIP_VERSION_STRING"
|
||||
_PIP_MAJOR_VERSION=$(echo "$_PIP_VERSION_STRING" | sed -E 's/^pip ([0-9]+)\..*/\1/')
|
||||
|
||||
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
|
||||
|
||||
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
|
||||
${_PIP_INSTALL_ARGS} \
|
||||
--global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
|
||||
${_TMP_DIR}/git/deps/salt*.whl || return 1
|
||||
# The following branching can be removed once we no longer support distros that still ship with
|
||||
# versions of `pip` earlier than v22.1 such as Debian 11
|
||||
if [ "$_PIP_MAJOR_VERSION" -lt 23 ]; then
|
||||
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
|
||||
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
|
||||
${_PIP_INSTALL_ARGS} \
|
||||
--global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
|
||||
${_TMP_DIR}/git/deps/salt*.whl || return 1
|
||||
else
|
||||
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --config-settings=--global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
|
||||
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
|
||||
${_PIP_INSTALL_ARGS} \
|
||||
--config-settings="--global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
|
||||
${_TMP_DIR}/git/deps/salt*.whl || return 1
|
||||
fi
|
||||
|
||||
echoinfo "Checking if Salt can be imported using ${_py_exe}"
|
||||
CHECK_SALT_SCRIPT=$(cat << EOM
|
||||
@@ -6096,7 +6107,14 @@ install_arch_linux_git_deps() {
|
||||
}
|
||||
|
||||
install_arch_linux_onedir_deps() {
|
||||
echodebug "install_arch_linux_onedir_deps() entry"
|
||||
|
||||
# Basic tooling for download/verify/extract
|
||||
pacman -Sy --noconfirm --needed wget tar gzip gnupg ca-certificates || return 1
|
||||
|
||||
# Reuse stable deps for python-yaml etc. if you want config_salt() parity
|
||||
install_arch_linux_stable_deps || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
install_arch_linux_stable() {
|
||||
@@ -6111,7 +6129,73 @@ install_arch_linux_stable() {
|
||||
pacman -S --noconfirm --needed bash || return 1
|
||||
pacman -Su --noconfirm || return 1
|
||||
# We can now resume regular salt update
|
||||
pacman -Syu --noconfirm salt || return 1
|
||||
# Except that this hasn't been in arch repos for years;
|
||||
# so we have to build from AUR
|
||||
# We use "buildgirl" because Eve demanded it.
|
||||
build_user=${build_user:-buildgirl}
|
||||
userdel "$build_user" || true
|
||||
useradd -M -r -s /usr/bin/nologin "$build_user"
|
||||
echo "$build_user ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/"$build_user"
|
||||
rm -rf /tmp/yay-bin || true
|
||||
|
||||
git clone https://aur.archlinux.org/salt.git /tmp/yay-bin
|
||||
chown -R "$build_user":"$build_user" /tmp/yay-bin
|
||||
sudo -u "$build_user" env -i \
|
||||
HOME=/tmp \
|
||||
PATH=/usr/bin:/bin:/usr/sbin:/sbin \
|
||||
MAKEFLAGS="-j$(nproc)" \
|
||||
LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 \
|
||||
makepkg -CcsiD /tmp/yay-bin \
|
||||
--noconfirm --needed \
|
||||
--noprogressbar || return 1
|
||||
|
||||
rm -f /etc/sudoers.d/"$build_user"
|
||||
rm -rf /tmp/yay-bin
|
||||
userdel "$build_user"
|
||||
return 0
|
||||
}
|
||||
|
||||
install_arch_linux_onedir() {
|
||||
echodebug "install_arch_linux_onedir() entry"
|
||||
|
||||
version="${ONEDIR_REV:-latest}"
|
||||
arch="x86_64"
|
||||
[ "$(uname -m)" = "aarch64" ] && arch="aarch64"
|
||||
|
||||
# Resolve "latest" to actual version
|
||||
if [ "$version" = "latest" ]; then
|
||||
version=$(wget -qO- https://api.github.com/repos/saltstack/salt/releases/latest \
|
||||
| grep -Eo '"tag_name": *"v[0-9.]+"' \
|
||||
| sed 's/"tag_name": *"v//;s/"//') || return 1
|
||||
fi
|
||||
|
||||
tarball="salt-${version}-onedir-linux-${arch}.tar.xz"
|
||||
url="https://github.com/saltstack/salt/releases/download/v${version}/${tarball}"
|
||||
extractdir="/tmp/salt-${version}-onedir-linux-${arch}"
|
||||
|
||||
echoinfo "Downloading Salt onedir: $url"
|
||||
wget -q "$url" -O "/tmp/${tarball}" || return 1
|
||||
|
||||
# Validate tarball
|
||||
if ! tar -tf "/tmp/${tarball}" >/dev/null 2>&1; then
|
||||
echoerror "Invalid or corrupt onedir tarball"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Prepare extraction
|
||||
rm -rf "$extractdir" || true
|
||||
rm -rf /opt/saltstack/salt || true
|
||||
mkdir -p "$extractdir"
|
||||
|
||||
# Extract and flatten (remove leading 'salt/' directory)
|
||||
# /tmp/salt-${version}-onedir-linux-${arch}
|
||||
tar --strip-components=1 -xf "/tmp/${tarball}" -C "$extractdir"
|
||||
|
||||
# Place into /opt
|
||||
mkdir -p /opt/saltstack/salt
|
||||
mv "$extractdir"/* /opt/saltstack/salt/ || return 1
|
||||
chmod -R 755 /opt/saltstack/salt
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -6249,17 +6333,48 @@ install_arch_check_services() {
|
||||
return 0
|
||||
}
|
||||
|
||||
install_arch_linux_onedir() {
|
||||
install_arch_linux_stable || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
install_arch_linux_onedir_post() {
|
||||
install_arch_linux_post || return 1
|
||||
echodebug "install_arch_linux_onedir_post() entry"
|
||||
|
||||
return 0
|
||||
# Disable any distro/AUR salt units
|
||||
systemctl disable --now salt-minion.service 2>/dev/null || true
|
||||
systemctl disable --now salt-master.service 2>/dev/null || true
|
||||
|
||||
# Drop a clean unit, same pattern as Debian/Ubuntu onedir
|
||||
cat >/etc/systemd/system/salt-minion.service <<'EOF'
|
||||
[Unit]
|
||||
Description=Salt Minion (onedir)
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/opt/saltstack/salt/salt-minion -c /etc/salt
|
||||
Restart=always
|
||||
LimitNOFILE=100000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
|
||||
# Add onedir paths system-wide
|
||||
cat >/etc/profile.d/saltstack.sh <<'EOF'
|
||||
export PATH=/opt/saltstack/salt:/opt/saltstack/salt/bin:$PATH
|
||||
EOF
|
||||
|
||||
chmod 644 /etc/profile.d/saltstack.sh
|
||||
|
||||
if [ "$_START_DAEMONS" -eq $BS_TRUE ]; then
|
||||
systemctl enable --now salt-minion.service
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
#
|
||||
# Ended Arch Install Functions
|
||||
#
|
||||
|
||||
@@ -1518,6 +1518,7 @@ soc:
|
||||
anonymousCidr:
|
||||
apiKey:
|
||||
staticrbac:
|
||||
defaultRole: ""
|
||||
roleFiles:
|
||||
- rbac/permissions
|
||||
- rbac/roles
|
||||
@@ -2379,6 +2380,10 @@ soc:
|
||||
exclusive: true
|
||||
enablesToggles:
|
||||
- acknowledged
|
||||
- name: investigated
|
||||
filter: event.investigated:true
|
||||
enabled: false
|
||||
exclusive: false
|
||||
queries:
|
||||
- name: 'Group By Name, Module'
|
||||
query: '* | groupby rule.name event.module* event.severity_label rule.uuid'
|
||||
@@ -2662,18 +2667,11 @@ soc:
|
||||
thresholdColorRatioMax: 1
|
||||
availableModels:
|
||||
- id: sonnet-4.5
|
||||
displayName: Claude Sonnet 4.5 ($$$)
|
||||
displayName: Claude Sonnet 4.5
|
||||
origin: USA
|
||||
contextLimitSmall: 200000
|
||||
contextLimitLarge: 1000000
|
||||
lowBalanceColorAlert: 500000
|
||||
enabled: true
|
||||
adapter: SOAI
|
||||
- id: qwen-235b
|
||||
displayName: QWEN 235B ($)
|
||||
origin: China
|
||||
contextLimitSmall: 256000
|
||||
contextLimitLarge: 256000
|
||||
lowBalanceColorAlert: 500000
|
||||
enabled: true
|
||||
adapter: SOAI
|
||||
|
||||
|
||||
@@ -455,6 +455,11 @@ soc:
|
||||
global: True
|
||||
advanced: True
|
||||
forcedType: int
|
||||
staticrbac:
|
||||
defaultRole:
|
||||
description: "Default role for new users that have not been assigned a role. When a role is specified, an attempt will be made to permanently assign the role to the user once the user accesses SOC. The role name must match exactly the name of an existing RBAC role. Standard system roles include: limited-auditor, limited-analyst, auditor, analyst, superuser"
|
||||
global: True
|
||||
advanced: False
|
||||
strelkaengine:
|
||||
aiRepoUrl:
|
||||
description: URL to the AI repository. This is used to pull in AI models for use in Strelka rules.
|
||||
@@ -660,10 +665,11 @@ soc:
|
||||
global: True
|
||||
advanced: True
|
||||
adapters:
|
||||
description: Configuration for AI adapters used by the Onion AI assistant.
|
||||
description: Configuration for AI adapters used by the Onion AI assistant. Please see documentation for help on which fields are required for which protocols.
|
||||
global: True
|
||||
advanced: True
|
||||
forcedType: "[]{}"
|
||||
helpLink: assistant.html
|
||||
syntax: json
|
||||
uiElements:
|
||||
- field: name
|
||||
@@ -750,6 +756,8 @@ soc:
|
||||
- field: lowBalanceColorAlert
|
||||
label: Low Balance Color Alert
|
||||
forcedType: int
|
||||
- field: adapter
|
||||
label: Adapter
|
||||
required: True
|
||||
- field: enabled
|
||||
label: Enabled
|
||||
|
||||
Reference in New Issue
Block a user