Compare commits

..

24 Commits

Author SHA1 Message Date
reyesj2
edf3c9464f add --certs flag to update certs. Used with --force, to ensure certs are updated even if hosts update isn't needed 2025-11-25 16:16:19 -06:00
reyesj2
5a8ea57a1b move off of cmd.script with args \
https://github.com/saltstack/salt/issues/68298
2025-11-03 15:31:14 -06:00
Josh Patterson
2f6c1b82a6 Merge pull request #15185 from Security-Onion-Solutions/salt300616
Upgrade Salt 3006.16
2025-10-31 09:47:01 -04:00
Josh Patterson
b8c2808abe update salt-cloud profile after new code copied 2025-10-30 15:09:40 -04:00
Josh Patterson
9027e4e065 update salt-cloud profile after new code copied 2025-10-30 14:48:48 -04:00
Josh Patterson
8ca5276a0e update cloud profile with local and point to new code 2025-10-30 13:59:08 -04:00
Josh Patterson
ee45a5524d Merge remote-tracking branch 'origin/2.4/dev' into salt300616 2025-10-30 13:13:55 -04:00
Josh Patterson
70d4223a75 update salt-cloud config if salt was upgraded 2025-10-30 13:13:16 -04:00
Jorge Reyes
7ab2840381 Merge pull request #15182 from Security-Onion-Solutions/reyesj2-influxdb-metrics
add manager role to elasticsearch ingest time spent
2025-10-30 12:03:58 -05:00
reyesj2
78c951cb70 add manager role to elastic ingest time spent 2025-10-30 11:15:58 -05:00
Josh Patterson
a0a3a80151 Merge remote-tracking branch 'origin/2.4/dev' into salt300616 2025-10-30 11:57:15 -04:00
Josh Patterson
3ecffd5588 Merge pull request #15181 from Security-Onion-Solutions/volumes
create libvirt volumes directory
2025-10-30 11:31:30 -04:00
Josh Patterson
8ea66bb0e9 create libvirt volumes directory 2025-10-30 11:02:36 -04:00
Jorge Reyes
9359fbbad6 Merge pull request #15176 from Security-Onion-Solutions/reyesj2/ilmpolicyhelp 2025-10-29 16:49:07 -05:00
Josh Patterson
1949be90c2 allow to preserve files 2025-10-29 16:49:59 -04:00
Josh Patterson
30970acfaf var for SALTVERSION in cloud config 2025-10-29 16:05:12 -04:00
Josh Patterson
6d12a8bfa1 handle salt-cloud upgrade during soup 2025-10-29 15:31:46 -04:00
reyesj2
2fb41c8d65 elasticsearch retention estimate 2025-10-29 14:24:43 -05:00
reyesj2
835b2609b6 telegraf - increase esindexsize.sh script timeout 2025-10-29 13:45:55 -05:00
Josh Patterson
10ae53f108 upgrade salt 3006.16 2025-10-29 10:23:44 -04:00
Jason Ertel
68bfceb727 Merge pull request #15170 from Security-Onion-Solutions/jertel/wip
bump version
2025-10-24 16:46:24 -04:00
Jason Ertel
f348c7168f bump version 2025-10-24 16:19:24 -04:00
Jason Ertel
627d9bf45d Merge pull request #15169 from Security-Onion-Solutions/jertel/wip
bump version
2025-10-24 16:18:43 -04:00
Jason Ertel
2aee8ab511 bump version 2025-10-24 16:11:50 -04:00
15 changed files with 1307 additions and 30 deletions

View File

@@ -32,6 +32,7 @@ body:
- 2.4.170 - 2.4.170
- 2.4.180 - 2.4.180
- 2.4.190 - 2.4.190
- 2.4.200
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true

View File

@@ -1 +1 @@
2.4.190 2.4.200

View File

@@ -220,12 +220,22 @@ compare_es_versions() {
} }
copy_new_files() { copy_new_files() {
# Define files to exclude from deletion (relative to their respective base directories)
local EXCLUDE_FILES=(
"salt/hypervisor/soc_hypervisor.yaml"
)
# Build rsync exclude arguments
local EXCLUDE_ARGS=()
for file in "${EXCLUDE_FILES[@]}"; do
EXCLUDE_ARGS+=(--exclude="$file")
done
# Copy new files over to the salt dir # Copy new files over to the salt dir
cd $UPDATE_DIR cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/ --delete rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
rsync -a pillar $DEFAULT_SALT_DIR/ --delete rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
chown -R socore:socore $DEFAULT_SALT_DIR/ chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp cd /tmp
} }

View File

@@ -32,6 +32,16 @@ so-elastic-fleet-auto-configure-logstash-outputs:
- retry: - retry:
attempts: 4 attempts: 4
interval: 30 interval: 30
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
so-elastic-fleet-auto-configure-logstash-outputs-force:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs
- retry:
attempts: 4
interval: 30
- onchanges:
- x509: etc_elasticfleet_logstash_crt
{% endif %} {% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection # If enabled, automatically update Fleet Server URLs & ES Connection

View File

@@ -8,20 +8,28 @@
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% if not AGENT_STATUS %} {% if not AGENT_STATUS %}
pull_agent_installer:
file.managed:
- name: /opt/so/so-elastic-agent_linux_amd64
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- mode: 755
- makedirs: True
{% if grains.role not in ['so-heavynode'] %} {% if grains.role not in ['so-heavynode'] %}
run_installer: run_installer:
cmd.script: cmd.run:
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64 - name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENGENERAL }}
- cwd: /opt/so - cwd: /opt/so
- args: -token={{ GRIDNODETOKENGENERAL }}
- retry: True - retry: True
{% else %} {% else %}
run_installer: run_installer:
cmd.script: cmd.run:
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64 - name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENHEAVY }}
- cwd: /opt/so - cwd: /opt/so
- args: -token={{ GRIDNODETOKENHEAVY }}
- retry: True - retry: True
{% endif %} {% endif %}
cleanup_agent_installer:
file.absent:
- name: /opt/so/so-elastic-agent_linux_amd64
{% endif %} {% endif %}

View File

@@ -8,6 +8,27 @@
. /usr/sbin/so-common . /usr/sbin/so-common
FORCE_UPDATE=false
UPDATE_CERTS=false
while [[ $# -gt 0 ]]; do
case $1 in
-f|--force)
FORCE_UPDATE=true
shift
;;
-c| --certs)
UPDATE_CERTS=true
shift
;;
*)
echo "Unknown option $1"
echo "Usage: $0 [-f|--force] [-c|--certs]"
exit 1
;;
esac
done
# Only run on Managers # Only run on Managers
if ! is_manager_node; then if ! is_manager_node; then
printf "Not a Manager Node... Exiting" printf "Not a Manager Node... Exiting"
@@ -17,17 +38,42 @@ fi
function update_logstash_outputs() { function update_logstash_outputs() {
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl') SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
JSON_STRING=$(jq -n \ if [[ "$UPDATE_CERTS" != "true" ]]; then
--arg UPDATEDLIST "$NEW_LIST_JSON" \ # Reuse existing secret
--argjson SECRETS "$SECRETS" \ JSON_STRING=$(jq -n \
--argjson SSL_CONFIG "$SSL_CONFIG" \ --arg UPDATEDLIST "$NEW_LIST_JSON" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}') --argjson SECRETS "$SECRETS" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else
# Update certs, creating new secret
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
fi
else else
JSON_STRING=$(jq -n \ if [[ "$UPDATE_CERTS" != "true" ]]; then
--arg UPDATEDLIST "$NEW_LIST_JSON" \ # Reuse existing ssl config
--argjson SSL_CONFIG "$SSL_CONFIG" \ JSON_STRING=$(jq -n \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}') --arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
else
# Update ssl config
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
fi
fi fi
fi fi
@@ -151,7 +197,7 @@ NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "$
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
# Compare the current & new list of outputs - if different, update the Logstash outputs # Compare the current & new list of outputs - if different, update the Logstash outputs
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
printf "\nHashes match - no update needed.\n" printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -31,6 +31,19 @@ libvirt_conf_dir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
libvirt_volumes:
file.directory:
- name: /nsm/libvirt/volumes
- user: qemu
- group: qemu
- dir_mode: 755
- file_mode: 640
- recurse:
- user
- group
- mode
- makedirs: True
libvirt_config: libvirt_config:
file.managed: file.managed:
- name: /opt/so/conf/libvirt/libvirtd.conf - name: /opt/so/conf/libvirt/libvirtd.conf

View File

@@ -21,6 +21,8 @@ whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false NOTIFYCUSTOMELASTICCONFIG=false
TOPFILE=/opt/so/saltstack/default/salt/top.sls TOPFILE=/opt/so/saltstack/default/salt/top.sls
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
SALTUPGRADED=false
SALT_CLOUD_INSTALLED=false
# used to display messages to the user at the end of soup # used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=() declare -a FINAL_MESSAGE_QUEUE=()
@@ -1260,24 +1262,39 @@ upgrade_check_salt() {
} }
upgrade_salt() { upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo "" echo ""
# If rhel family # If rhel family
if [[ $is_rpm ]]; then if [[ $is_rpm ]]; then
# Check if salt-cloud is installed
if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true
fi
echo "Removing yum versionlock for Salt." echo "Removing yum versionlock for Salt."
echo "" echo ""
yum versionlock delete "salt" yum versionlock delete "salt"
yum versionlock delete "salt-minion" yum versionlock delete "salt-minion"
yum versionlock delete "salt-master" yum versionlock delete "salt-master"
# Remove salt-cloud versionlock if installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
yum versionlock delete "salt-cloud"
fi
echo "Updating Salt packages." echo "Updating Salt packages."
echo "" echo ""
set +e set +e
# if oracle run with -r to ignore repos set by bootstrap # if oracle run with -r to ignore repos set by bootstrap
if [[ $OS == 'oracle' ]]; then if [[ $OS == 'oracle' ]]; then
run_check_net_err \ # Add -L flag only if salt-cloud is already installed
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \ if [[ $SALT_CLOUD_INSTALLED == true ]]; then
"Could not update salt, please check $SOUP_LOG for details." run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -L -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos # if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else else
run_check_net_err \ run_check_net_err \
@@ -1290,6 +1307,10 @@ upgrade_salt() {
yum versionlock add "salt-0:$NEWSALTVERSION-0.*" yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*" yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*" yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
# Add salt-cloud versionlock if installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
yum versionlock add "salt-cloud-0:$NEWSALTVERSION-0.*"
fi
# Else do Ubuntu things # Else do Ubuntu things
elif [[ $is_deb ]]; then elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt." echo "Removing apt hold for Salt."
@@ -1322,6 +1343,7 @@ upgrade_salt() {
echo "" echo ""
exit 1 exit 1
else else
SALTUPGRADED=true
echo "Salt upgrade success." echo "Salt upgrade success."
echo "" echo ""
fi fi
@@ -1565,6 +1587,11 @@ main() {
# ensure the mine is updated and populated before highstates run, following the salt-master restart # ensure the mine is updated and populated before highstates run, following the salt-master restart
update_salt_mine update_salt_mine
if [[ $SALT_CLOUD_INSTALLED == true && $SALTUPGRADED == true ]]; then
echo "Updating salt-cloud config to use the new Salt version"
salt-call state.apply salt.cloud.config concurrent=True
fi
enable_highstate enable_highstate
echo "" echo ""

View File

@@ -14,7 +14,7 @@ sool9_{{host}}:
private_key: /etc/ssh/auth_keys/soqemussh/id_ecdsa private_key: /etc/ssh/auth_keys/soqemussh/id_ecdsa
sudo: True sudo: True
deploy_command: sh /tmp/.saltcloud-*/deploy.sh deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -r -F -x python3 stable 3006.9 script_args: -r -F -x python3 stable {{ SALTVERSION }}
minion: minion:
master: {{ grains.host }} master: {{ grains.host }}
master_port: 4506 master_port: 4506

View File

@@ -13,6 +13,7 @@
{% if '.'.join(sls.split('.')[:2]) in allowed_states %} {% if '.'.join(sls.split('.')[:2]) in allowed_states %}
{% if 'vrt' in salt['pillar.get']('features', []) %} {% if 'vrt' in salt['pillar.get']('features', []) %}
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %} {% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
{% from 'salt/map.jinja' import SALTVERSION %}
{% if HYPERVISORS %} {% if HYPERVISORS %}
cloud_providers: cloud_providers:
@@ -20,7 +21,7 @@ cloud_providers:
- name: /etc/salt/cloud.providers.d/libvirt.conf - name: /etc/salt/cloud.providers.d/libvirt.conf
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja - source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
- defaults: - defaults:
HYPERVISORS: {{HYPERVISORS}} HYPERVISORS: {{ HYPERVISORS }}
- template: jinja - template: jinja
- makedirs: True - makedirs: True
@@ -29,9 +30,10 @@ cloud_profiles:
- name: /etc/salt/cloud.profiles.d/socloud.conf - name: /etc/salt/cloud.profiles.d/socloud.conf
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja - source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
- defaults: - defaults:
HYPERVISORS: {{HYPERVISORS}} HYPERVISORS: {{ HYPERVISORS }}
MANAGERHOSTNAME: {{ grains.host }} MANAGERHOSTNAME: {{ grains.host }}
MANAGERIP: {{ pillar.host.mainip }} MANAGERIP: {{ pillar.host.mainip }}
SALTVERSION: {{ SALTVERSION }}
- template: jinja - template: jinja
- makedirs: True - makedirs: True
{% endif %} {% endif %}

View File

@@ -1,4 +1,4 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt: salt:
master: master:
version: '3006.9' version: '3006.16'

View File

@@ -1,5 +1,5 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt: salt:
minion: minion:
version: '3006.9' version: '3006.16'
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default

View File

@@ -337,4 +337,5 @@
] ]
data_format = "influx" data_format = "influx"
interval = "1h" interval = "1h"
timeout = "120s"
{%- endif %} {%- endif %}