mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Compare commits
1 Commits
reyesj2-pa
...
2.4.190-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33ada95bbc |
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -32,7 +32,6 @@ body:
|
||||
- 2.4.170
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- 2.4.200
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -220,22 +220,12 @@ compare_es_versions() {
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
# Define files to exclude from deletion (relative to their respective base directories)
|
||||
local EXCLUDE_FILES=(
|
||||
"salt/hypervisor/soc_hypervisor.yaml"
|
||||
)
|
||||
|
||||
# Build rsync exclude arguments
|
||||
local EXCLUDE_ARGS=()
|
||||
for file in "${EXCLUDE_FILES[@]}"; do
|
||||
EXCLUDE_ARGS+=(--exclude="$file")
|
||||
done
|
||||
|
||||
# Copy new files over to the salt dir
|
||||
cd $UPDATE_DIR
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete
|
||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
|
||||
@@ -32,16 +32,6 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
|
||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
{% endif %}
|
||||
|
||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||
|
||||
@@ -8,28 +8,20 @@
|
||||
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
||||
{% if not AGENT_STATUS %}
|
||||
|
||||
pull_agent_installer:
|
||||
file.managed:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
{% if grains.role not in ['so-heavynode'] %}
|
||||
run_installer:
|
||||
cmd.run:
|
||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENGENERAL }}
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENGENERAL }}
|
||||
- retry: True
|
||||
{% else %}
|
||||
run_installer:
|
||||
cmd.run:
|
||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENHEAVY }}
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENHEAVY }}
|
||||
- retry: True
|
||||
{% endif %}
|
||||
|
||||
cleanup_agent_installer:
|
||||
file.absent:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
{% endif %}
|
||||
|
||||
@@ -8,27 +8,6 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
FORCE_UPDATE=false
|
||||
UPDATE_CERTS=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
FORCE_UPDATE=true
|
||||
shift
|
||||
;;
|
||||
-c| --certs)
|
||||
UPDATE_CERTS=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force] [-c|--certs]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Only run on Managers
|
||||
if ! is_manager_node; then
|
||||
printf "Not a Manager Node... Exiting"
|
||||
@@ -38,42 +17,17 @@ fi
|
||||
function update_logstash_outputs() {
|
||||
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update certs, creating new secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
|
||||
fi
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||
else
|
||||
# Update ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
|
||||
fi
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -197,7 +151,7 @@ NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "$
|
||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
|
||||
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
||||
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
|
||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||
printf "\nHashes match - no update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -31,19 +31,6 @@ libvirt_conf_dir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
libvirt_volumes:
|
||||
file.directory:
|
||||
- name: /nsm/libvirt/volumes
|
||||
- user: qemu
|
||||
- group: qemu
|
||||
- dir_mode: 755
|
||||
- file_mode: 640
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
- mode
|
||||
- makedirs: True
|
||||
|
||||
libvirt_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/libvirt/libvirtd.conf
|
||||
|
||||
@@ -21,8 +21,6 @@ whiptail_title='Security Onion UPdater'
|
||||
NOTIFYCUSTOMELASTICCONFIG=false
|
||||
TOPFILE=/opt/so/saltstack/default/salt/top.sls
|
||||
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
||||
SALTUPGRADED=false
|
||||
SALT_CLOUD_INSTALLED=false
|
||||
# used to display messages to the user at the end of soup
|
||||
declare -a FINAL_MESSAGE_QUEUE=()
|
||||
|
||||
@@ -1262,39 +1260,24 @@ upgrade_check_salt() {
|
||||
}
|
||||
|
||||
upgrade_salt() {
|
||||
SALTUPGRADED=True
|
||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||
echo ""
|
||||
# If rhel family
|
||||
if [[ $is_rpm ]]; then
|
||||
# Check if salt-cloud is installed
|
||||
if rpm -q salt-cloud &>/dev/null; then
|
||||
SALT_CLOUD_INSTALLED=true
|
||||
fi
|
||||
|
||||
echo "Removing yum versionlock for Salt."
|
||||
echo ""
|
||||
yum versionlock delete "salt"
|
||||
yum versionlock delete "salt-minion"
|
||||
yum versionlock delete "salt-master"
|
||||
# Remove salt-cloud versionlock if installed
|
||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
||||
yum versionlock delete "salt-cloud"
|
||||
fi
|
||||
echo "Updating Salt packages."
|
||||
echo ""
|
||||
set +e
|
||||
# if oracle run with -r to ignore repos set by bootstrap
|
||||
if [[ $OS == 'oracle' ]]; then
|
||||
# Add -L flag only if salt-cloud is already installed
|
||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
||||
run_check_net_err \
|
||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -L -F -M stable \"$NEWSALTVERSION\"" \
|
||||
"Could not update salt, please check $SOUP_LOG for details."
|
||||
else
|
||||
run_check_net_err \
|
||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
|
||||
"Could not update salt, please check $SOUP_LOG for details."
|
||||
fi
|
||||
run_check_net_err \
|
||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
|
||||
"Could not update salt, please check $SOUP_LOG for details."
|
||||
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
|
||||
else
|
||||
run_check_net_err \
|
||||
@@ -1307,10 +1290,6 @@ upgrade_salt() {
|
||||
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
|
||||
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
|
||||
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
|
||||
# Add salt-cloud versionlock if installed
|
||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
||||
yum versionlock add "salt-cloud-0:$NEWSALTVERSION-0.*"
|
||||
fi
|
||||
# Else do Ubuntu things
|
||||
elif [[ $is_deb ]]; then
|
||||
echo "Removing apt hold for Salt."
|
||||
@@ -1343,7 +1322,6 @@ upgrade_salt() {
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
SALTUPGRADED=true
|
||||
echo "Salt upgrade success."
|
||||
echo ""
|
||||
fi
|
||||
@@ -1587,11 +1565,6 @@ main() {
|
||||
# ensure the mine is updated and populated before highstates run, following the salt-master restart
|
||||
update_salt_mine
|
||||
|
||||
if [[ $SALT_CLOUD_INSTALLED == true && $SALTUPGRADED == true ]]; then
|
||||
echo "Updating salt-cloud config to use the new Salt version"
|
||||
salt-call state.apply salt.cloud.config concurrent=True
|
||||
fi
|
||||
|
||||
enable_highstate
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -14,7 +14,7 @@ sool9_{{host}}:
|
||||
private_key: /etc/ssh/auth_keys/soqemussh/id_ecdsa
|
||||
sudo: True
|
||||
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
|
||||
script_args: -r -F -x python3 stable {{ SALTVERSION }}
|
||||
script_args: -r -F -x python3 stable 3006.9
|
||||
minion:
|
||||
master: {{ grains.host }}
|
||||
master_port: 4506
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
{% if '.'.join(sls.split('.')[:2]) in allowed_states %}
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
|
||||
{% from 'salt/map.jinja' import SALTVERSION %}
|
||||
|
||||
{% if HYPERVISORS %}
|
||||
cloud_providers:
|
||||
@@ -21,7 +20,7 @@ cloud_providers:
|
||||
- name: /etc/salt/cloud.providers.d/libvirt.conf
|
||||
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
|
||||
- defaults:
|
||||
HYPERVISORS: {{ HYPERVISORS }}
|
||||
HYPERVISORS: {{HYPERVISORS}}
|
||||
- template: jinja
|
||||
- makedirs: True
|
||||
|
||||
@@ -30,10 +29,9 @@ cloud_profiles:
|
||||
- name: /etc/salt/cloud.profiles.d/socloud.conf
|
||||
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
|
||||
- defaults:
|
||||
HYPERVISORS: {{ HYPERVISORS }}
|
||||
HYPERVISORS: {{HYPERVISORS}}
|
||||
MANAGERHOSTNAME: {{ grains.host }}
|
||||
MANAGERIP: {{ pillar.host.mainip }}
|
||||
SALTVERSION: {{ SALTVERSION }}
|
||||
- template: jinja
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
master:
|
||||
version: '3006.16'
|
||||
version: '3006.9'
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
minion:
|
||||
version: '3006.16'
|
||||
version: '3006.9'
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
|
||||
@@ -337,5 +337,4 @@
|
||||
]
|
||||
data_format = "influx"
|
||||
interval = "1h"
|
||||
timeout = "120s"
|
||||
{%- endif %}
|
||||
|
||||
Reference in New Issue
Block a user