Compare commits

..

26 Commits

Author SHA1 Message Date
Mike Reeves ee36db4dd7 Merge pull request #15817 from Security-Onion-Solutions/feature/postgres
soup: drop --local from postgres.telegraf_users reconcile
2026-04-23 11:28:24 -04:00
Mike Reeves b7faa0e437 Merge pull request #15816 from Security-Onion-Solutions/feature/postgres
soup: bootstrap postgres pillar stubs and secret on 3.0.0 upgrade
2026-04-23 10:13:57 -04:00
Mike Reeves fad953b2b3 Merge pull request #15812 from Security-Onion-Solutions/feature/postgres
so-telegraf-cred: make executable and harden error handling
2026-04-22 14:31:58 -04:00
Mike Reeves 2c341e5160 Merge pull request #15810 from Security-Onion-Solutions/feature/postgres
Split postgres auth pillar from per-minion telegraf creds
2026-04-22 11:13:55 -04:00
Mike Reeves 8425ac4100 Merge pull request #15808 from Security-Onion-Solutions/feature/postgres
Feature/postgres
2026-04-21 15:48:08 -04:00
Mike Reeves 922fc60466 Merge pull request #15804 from Security-Onion-Solutions/feature/postgres-integration
postgres integration: sync feature/postgres to bravo for automated testing
2026-04-21 11:14:56 -04:00
Mike Reeves da69f0f1a4 Merge pull request #15793 from Security-Onion-Solutions/feature/postgres
Harden postgres secrets, TLS enforcement, and admin tooling
2026-04-20 12:38:29 -04:00
Mike Reeves 29b24fa263 Merge pull request #15788 from Security-Onion-Solutions/feature/postgres
Wait for TCP-ready postgres, not the init-phase Unix socket
2026-04-17 16:46:59 -04:00
Mike Reeves 981d8bb805 Merge pull request #15787 from Security-Onion-Solutions/feature/postgres
Fix Telegraf postgres template syntax, partman privileges, and idempotency
2026-04-17 15:47:35 -04:00
Mike Reeves 4e3dbd800c Merge pull request #15785 from Security-Onion-Solutions/feature/postgres
Fix Telegraf→Postgres table creation and state.apply race
2026-04-17 13:03:26 -04:00
Mike Reeves dc998191d9 Merge pull request #15784 from Security-Onion-Solutions/feature/postgres
Create so_telegraf DB from Salt and pin pg_partman schema
2026-04-17 10:55:00 -04:00
Mike Reeves 9cce920d78 Merge pull request #15781 from Security-Onion-Solutions/feature/postgres
Telegraf Postgres: shared schema + JSONB storage
2026-04-16 17:29:29 -04:00
Mike Reeves a5e5f12889 Merge pull request #15779 from Security-Onion-Solutions/feature/postgres
so-log-check: exclude psql ON_ERROR_STOP flag
2026-04-15 19:47:44 -04:00
Mike Reeves 999f3f5b15 Merge pull request #15778 from Security-Onion-Solutions/feature/postgres
Fix so-show-stats tag column resolution
2026-04-15 19:32:55 -04:00
Mike Reeves 6f9da893ac Merge pull request #15777 from Security-Onion-Solutions/feature/postgres
Postgres integration: SOC module config + Telegraf dual-write backend
2026-04-15 16:22:27 -04:00
Mike Reeves 0d3e2a0708 Merge pull request #15759 from Security-Onion-Solutions/feature/postgres
Add ES credentials to postgres SOC module config
2026-04-10 11:44:20 -04:00
Mike Reeves e339aa41d5 Merge pull request #15757 from Security-Onion-Solutions/feature/postgres
Add postgres admin password to SOC config
2026-04-09 22:24:23 -04:00
Mike Reeves 01a24b3684 Merge pull request #15756 from Security-Onion-Solutions/feature/postgres
Fix init-users.sh password escaping for special characters
2026-04-09 22:00:09 -04:00
Mike Reeves f1cdd265f9 Merge pull request #15755 from Security-Onion-Solutions/feature/postgres
Only load postgres module on manager nodes
2026-04-09 21:10:57 -04:00
Mike Reeves 631f5bd754 Merge pull request #15753 from Security-Onion-Solutions/feature/postgres
Use manager IP for postgres host in SOC config
2026-04-09 19:45:33 -04:00
Mike Reeves fb4615d5cd Merge pull request #15750 from Security-Onion-Solutions/feature/postgres
Wire postgres credentials into SOC module config
2026-04-09 14:55:51 -04:00
Mike Reeves 6eaf22fc5a Merge pull request #15748 from Security-Onion-Solutions/feature/postgres
Add postgres.auth to allowed_states
2026-04-09 12:47:00 -04:00
Mike Reeves 592a6a4c21 Merge pull request #15747 from Security-Onion-Solutions/feature/postgres
Enable postgres by default for manager nodes
2026-04-09 12:24:37 -04:00
Mike Reeves 409d4fb632 Merge pull request #15746 from Security-Onion-Solutions/feature/postgres
Add daily PostgreSQL database backup
2026-04-09 10:44:47 -04:00
Mike Reeves 9d72149fcd Merge pull request #15743 from Security-Onion-Solutions/feature/postgres
Add so-postgres container and Salt infrastructure
2026-04-09 10:05:15 -04:00
Mike Reeves e6afecbaa9 Change version from 3.1.0 to 3.0.0-bravo 2026-04-09 09:47:53 -04:00
34 changed files with 260 additions and 748 deletions
+1 -1
View File
@@ -1 +1 @@
3.1.0
3.0.0-bravo
+1 -3
View File
@@ -35,8 +35,6 @@
'kratos',
'hydra',
'elasticfleet',
'elasticfleet.manager',
'elasticsearch.cluster',
'elastic-fleet-package-registry',
'utility'
] %}
@@ -81,7 +79,7 @@
),
'so-heavynode': (
sensor_states +
['elasticagent', 'elasticsearch', 'elasticsearch.cluster', 'logstash', 'redis', 'nginx']
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
),
'so-idh': (
['idh']
+4 -23
View File
@@ -164,8 +164,8 @@ update_docker_containers() {
# Pull down the trusted docker image
run_check_net_err \
"docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \
"Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1
"Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1
# Get signature
run_check_net_err \
"curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' $sig_url --output $SIGNPATH/$image.sig" \
@@ -188,27 +188,8 @@ update_docker_containers() {
if [ -z "$HOSTNAME" ]; then
HOSTNAME=$(hostname)
fi
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 || {
echo "Unable to tag $image" >> "$LOG_FILE" 2>&1
exit 1
}
# Push to the embedded registry via a registry-to-registry copy. Avoids
# `docker push`, which on Docker 29.x with the containerd image store
# represents freshly-pulled images as an index whose layer content
# isn't reachable through the push path. The local `docker tag` above
# is preserved so so-image-pull's `:5000` existence check still works.
# Pin to the digest already gpg-verified above so we copy exactly the
# bytes we approved.
local VERIFIED_REF
VERIFIED_REF=$(echo "$DOCKERINSPECT" | jq -r ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" | head -n 1)
if [ -z "$VERIFIED_REF" ] || [ "$VERIFIED_REF" = "null" ]; then
echo "Unable to determine verified digest for $image" >> "$LOG_FILE" 2>&1
exit 1
fi
docker buildx imagetools create --tag $HOSTNAME:5000/$IMAGEREPO/$image "$VERIFIED_REF" >> "$LOG_FILE" 2>&1 || {
echo "Unable to copy $image to embedded registry" >> "$LOG_FILE" 2>&1
exit 1
}
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
fi
else
echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1
+1 -1
View File
@@ -227,7 +227,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint|armis|o365_metrics|microsoft_sentinel|snyk|cyera|island_browser).*user so_kibana lacks the required permissions \[(logs|metrics)-\1" # Known issue with integrations starting transform jobs that are explicitly not allowed to start as a system user. This error should not be seen on fresh ES 9.3.3 installs or after SO 3.1.0 with soups addition of check_transform_health_and_reauthorize()
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint).*user so_kibana lacks the required permissions \[logs-\1" # Known issue with 3 integrations using kibana_system role vs creating unique api creds with proper permissions.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1
fi
+2 -9
View File
@@ -9,7 +9,7 @@
. /usr/sbin/so-common
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02" "HVGUEST")
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
{%- if salt['grains.get']('sosmodel', '') %}
@@ -87,11 +87,6 @@ check_boss_raid() {
}
check_software_raid() {
if [[ ! -f /proc/mdstat ]]; then
SWRAID=0
return
fi
SWRC=$(grep "_" /proc/mdstat)
if [[ -n $SWRC ]]; then
# RAID is failed in some way
@@ -112,9 +107,7 @@ if [[ "$is_hwraid" == "true" ]]; then
fi
if [[ "$is_softwareraid" == "true" ]]; then
check_software_raid
if [ "$model" != "HVGUEST" ]; then
check_boss_raid
fi
check_boss_raid
fi
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
@@ -51,16 +51,6 @@ so-elastic-fleet-package-registry:
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
wait_for_so-elastic-fleet-package-registry:
http.wait_for_successful_query:
- name: "http://localhost:8080/health"
- status: 200
- wait_for: 300
- request_interval: 15
- require:
- docker_container: so-elastic-fleet-package-registry
delete_so-elastic-fleet-package-registry_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
+103 -4
View File
@@ -17,17 +17,65 @@ include:
- logstash.ssl
- elasticfleet.config
- elasticfleet.sostatus
{%- if GLOBALS.role != "so-fleet" %}
- elasticfleet.manager
{%- endif %}
{% if GLOBALS.role != "so-fleet" %}
{% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet:
cmd.run:
- name: so-elasticsearch-wait
{% endif %}
# If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update
- retry:
attempts: 4
interval: 30
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
so-elastic-fleet-auto-configure-logstash-outputs-force:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
- retry:
attempts: 4
interval: 30
- onchanges:
- x509: etc_elasticfleet_logstash_crt
- x509: elasticfleet_kafka_crt
{% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-server-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update
- retry:
attempts: 4
interval: 30
{% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
{% if grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry:
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry:
attempts: 4
interval: 30
{% endif %}
# Sync Elastic Agent artifacts to Fleet Node
{% if grains.role in ['so-fleet'] %}
elasticagent_syncartifacts:
file.recurse:
- name: /nsm/elastic-fleet/artifacts/beats
@@ -101,6 +149,57 @@ so-elastic-fleet:
- x509: etc_elasticfleet_crt
{% endif %}
{% if GLOBALS.role != "so-fleet" %}
so-elastic-fleet-package-statefile:
file.managed:
- name: /opt/so/state/elastic_fleet_packages.txt
- contents: {{ELASTICFLEETMERGED.packages}}
so-elastic-fleet-package-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade
- retry:
attempts: 3
interval: 10
- onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
- retry:
attempts: 3
interval: 10
so-elastic-agent-grid-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-agent-grid-upgrade
- retry:
attempts: 12
interval: 5
so-elastic-fleet-integration-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
- retry:
attempts: 3
interval: 10
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
so-elastic-fleet-addon-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
so-elastic-defend-manage-filters-file-watch:
cmd.run:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- onchanges:
- file: elasticdefendcustom
- file: elasticdefenddisabled
{% endif %}
{% endif %}
delete_so-elastic-fleet_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
-101
View File
@@ -1,101 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
include:
- elasticfleet.config
# If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval'] %}
so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update
- retry:
attempts: 4
interval: 30
{% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection
so-elastic-fleet-auto-configure-server-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update
- retry:
attempts: 4
interval: 30
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry:
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry:
attempts: 4
interval: 30
so-elastic-fleet-package-statefile:
file.managed:
- name: /opt/so/state/elastic_fleet_packages.txt
- contents: {{ELASTICFLEETMERGED.packages}}
so-elastic-fleet-package-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade
- retry:
attempts: 3
interval: 10
- onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
- retry:
attempts: 3
interval: 10
so-elastic-agent-grid-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-agent-grid-upgrade
- retry:
attempts: 12
interval: 5
so-elastic-fleet-integration-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
- retry:
attempts: 3
interval: 10
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
so-elastic-fleet-addon-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
so-elastic-defend-manage-filters-file-watch:
cmd.run:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- onchanges:
- file: elasticdefendcustom
- file: elasticdefenddisabled
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
@@ -240,7 +240,7 @@ elastic_fleet_policy_create() {
--arg DESC "$DESC" \
--arg TIMEOUT $TIMEOUT \
--arg FLEETSERVER "$FLEETSERVER" \
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER,"advanced_settings":{"agent_logging_level": "warning"}}'
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
)
# Create Fleet Policy
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
@@ -5,12 +5,11 @@
# this file except in compliance with the Elastic License 2.0.
. /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{# Optionally override Elasticsearch version for Elastic Agent patch releases #}
{%- if ELASTICFLEETDEFAULTS.elasticfleet.patch_version is defined %}
{%- do ELASTICSEARCHDEFAULTS.elasticsearch.update({'version': ELASTICFLEETDEFAULTS.elasticfleet.patch_version}) %}
{%- do ELASTICSEARCHDEFAULTS.update({'elasticsearch': {'version': ELASTICFLEETDEFAULTS.elasticfleet.patch_version}}) %}
{%- endif %}
# Only run on Managers
@@ -20,10 +19,13 @@ if ! is_manager_node; then
fi
# Get current list of Grid Node Agents that need to be upgraded
if ! RAW_JSON=$(fleet_api "agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version | urlencode }}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null)
printf "Failed to query for current Grid Agents...\n"
exit 1
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
if [ "$CHECKSUM" -ne 1 ]; then
printf "Failed to query for current Grid Agents...\n"
exit 1
fi
# Generate list of Node Agents that need updates
@@ -34,12 +36,10 @@ if [ "$OUTDATED_LIST" != '[]' ]; then
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n"
# Generate updated JSON payload
JSON_STRING=$(jq -n --arg ELASTICVERSION "{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}" --argjson UPDATELIST "$OUTDATED_LIST" '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
JSON_STRING=$(jq -n --arg ELASTICVERSION {{ELASTICSEARCHDEFAULTS.elasticsearch.version}} --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
# Update Node Agents
if ! fleet_api "agents/bulk_upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
printf "Failed to initiate Agent upgrades...\n"
fi
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
else
printf "No Agents need updates... Exiting\n\n"
exit 0
@@ -235,16 +235,6 @@ function update_kafka_outputs() {
{% endif %}
# Compare the current Elastic Fleet certificate against what is on disk
POLICY_CERT_SHA=$(jq -r '.item.ssl.certificate' <<< $RAW_JSON | openssl x509 -noout -sha256 -fingerprint)
DISK_CERT_SHA=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt -noout -sha256 -fingerprint)
if [[ "$POLICY_CERT_SHA" != "$DISK_CERT_SHA" ]]; then
printf "Certificate on disk doesn't match certificate in policy - forcing update\n"
UPDATE_CERTS=true
FORCE_UPDATE=true
fi
# Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
+1 -1
View File
@@ -4,7 +4,7 @@
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS, SO_MANAGED_INDICES %}
+7 -6
View File
@@ -17,7 +17,7 @@ include:
- elasticsearch.ssl
- elasticsearch.config
- elasticsearch.sostatus
{%- if GLOBALS.role != "so-searchnode" %}
{%- if GLOBALS.role != 'so-searchode' %}
- elasticsearch.cluster
{%- endif%}
@@ -102,6 +102,11 @@ so-elasticsearch:
- cmd: auth_users_roles_inode
- cmd: auth_users_inode
delete_so-elasticsearch_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-elasticsearch$
wait_for_so-elasticsearch:
http.wait_for_successful_query:
- name: "https://localhost:9200/"
@@ -112,14 +117,10 @@ wait_for_so-elasticsearch:
- status: 200
- wait_for: 300
- request_interval: 15
- backend: requests
- require:
- docker_container: so-elasticsearch
delete_so-elasticsearch_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-elasticsearch$
{% else %}
{{sls}}_state_not_allowed:
+1 -2
View File
@@ -63,8 +63,7 @@
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code instanceof String", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long}(?:\\s+%{GREEDYDATA})?"], "ignore_failure": true } },
{ "convert": { "if": "ctx.http?.response?.status_code != null && !(ctx.http.response.status_code instanceof Number)", "field": "http.response.status_code", "type": "long", "ignore_failure": true } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } },
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
@@ -103,13 +103,11 @@ load_component_templates() {
local pattern="${ELASTICSEARCH_TEMPLATES_DIR}/component/$2"
local append_mappings="${3:-"false"}"
# current state of nullglob shell option
shopt -q nullglob && nullglob_set=1 || nullglob_set=0
shopt -s nullglob
echo -e "\nLoading $printed_name component templates...\n"
if ! compgen -G "${pattern}/*.json" > /dev/null; then
echo "No $printed_name component templates found in ${pattern}, skipping."
return
fi
for component in "$pattern"/*.json; do
tmpl_name=$(basename "${component%.json}")
@@ -123,6 +121,11 @@ load_component_templates() {
SO_LOAD_FAILURES_NAMES+=("$component")
fi
done
# restore nullglob shell option if needed
if [[ $nullglob_set -eq 1 ]]; then
shopt -u nullglob
fi
}
check_elasticsearch_responsive() {
@@ -133,32 +136,7 @@ check_elasticsearch_responsive() {
fail "Elasticsearch is not responding. Please review Elasticsearch logs /opt/so/log/elasticsearch/securityonion.log for more details. Additionally, consider running so-elasticsearch-troubleshoot."
}
index_templates_exist() {
local templates_dir="$1"
if [[ ! -d "$templates_dir" ]]; then
return 1
fi
compgen -G "${templates_dir}/*.json" > /dev/null
}
should_load_addon_templates() {
if [[ "$IS_HEAVYNODE" == "true" ]]; then
return 1
fi
# Skip statefile checks when forcing template load
if [[ "$FORCE" != "true" ]]; then
if [[ ! -f "$SO_STATEFILE_SUCCESS" || -f "$ADDON_STATEFILE_SUCCESS" ]]; then
return 1
fi
fi
index_templates_exist "$ADDON_TEMPLATES_DIR"
}
if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]] && index_templates_exist "$SO_TEMPLATES_DIR"; then
if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]]; then
check_elasticsearch_responsive
if [[ "$IS_HEAVYNODE" == "false" ]]; then
@@ -223,14 +201,13 @@ if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]] && index_templates_e
fail "Failed to load all Security Onion core templates successfully."
fi
fi
elif ! index_templates_exist "$SO_TEMPLATES_DIR"; then
echo "No Security Onion core index templates found in ${SO_TEMPLATES_DIR}, skipping."
elif [[ -f "$SO_STATEFILE_SUCCESS" ]]; then
else
echo "Security Onion core templates already loaded"
fi
# Start loading addon templates
if should_load_addon_templates; then
if [[ (-d "$ADDON_TEMPLATES_DIR" && -f "$SO_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" && ! -f "$ADDON_STATEFILE_SUCCESS") || (-d "$ADDON_TEMPLATES_DIR" && "$IS_HEAVYNODE" == "false" && "$FORCE" == "true") ]]; then
check_elasticsearch_responsive
-33
View File
@@ -398,7 +398,6 @@ firewall:
- elasticsearch_rest
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -411,7 +410,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -429,7 +427,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- sensoroni
searchnode:
portgroups:
@@ -440,7 +437,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -454,7 +450,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -464,7 +459,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -498,7 +492,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- elastic_agent_control
@@ -509,7 +502,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -618,7 +610,6 @@ firewall:
- elasticsearch_rest
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -631,7 +622,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -649,7 +639,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- sensoroni
searchnode:
portgroups:
@@ -660,7 +649,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -674,7 +662,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -684,7 +671,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -716,7 +702,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- elastic_agent_control
@@ -727,7 +712,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -836,7 +820,6 @@ firewall:
- elasticsearch_rest
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -849,7 +832,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -867,7 +849,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- sensoroni
searchnode:
portgroups:
@@ -877,7 +858,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -890,7 +870,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -900,7 +879,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -934,7 +912,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- elastic_agent_control
@@ -945,7 +922,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -1064,7 +1040,6 @@ firewall:
- elasticsearch_rest
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -1077,7 +1052,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -1089,7 +1063,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- beats_5044
@@ -1101,7 +1074,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- redis
@@ -1111,7 +1083,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- redis
@@ -1122,7 +1093,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -1159,7 +1129,6 @@ firewall:
portgroups:
- docker_registry
- influxdb
- postgres
- sensoroni
- yum
- elastic_agent_control
@@ -1170,7 +1139,6 @@ firewall:
- yum
- docker_registry
- influxdb
- postgres
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -1514,7 +1482,6 @@ firewall:
- kibana
- redis
- influxdb
- postgres
- elasticsearch_rest
- elasticsearch_node
- elastic_agent_control
+13
View File
@@ -1,5 +1,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
{% from 'telegraf/map.jinja' import TELEGRAFMERGED %}
{% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %}
{# add our ip to self #}
@@ -55,4 +56,16 @@
{% endif %}
{# Open Postgres (5432) to minion hostgroups when Telegraf is configured to write to Postgres #}
{% set TG_OUT = TELEGRAFMERGED.output | upper %}
{% if TG_OUT in ['POSTGRES', 'BOTH'] %}
{% if role.startswith('manager') or role == 'standalone' or role == 'eval' %}
{% for r in ['sensor', 'searchnode', 'heavynode', 'receiver', 'fleet', 'idh', 'desktop', 'import'] %}
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('postgres') %}
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
+1
View File
@@ -59,4 +59,5 @@ global:
description: Allows use of Endgame with Security Onion. This feature requires a license from Endgame.
global: True
advanced: True
helpLink: influxdb
+1 -1
View File
@@ -22,7 +22,7 @@ kibana:
- default
- file
migrations:
discardCorruptObjects: "9.3.3"
discardCorruptObjects: "8.18.8"
telemetry:
enabled: False
xpack:
+1 -1
View File
@@ -3,8 +3,8 @@ kratos:
description: Enables or disables the Kratos authentication system. WARNING - Disabling this process will cause the grid to malfunction. Re-enabling this setting will require manual effort via SSH.
forcedType: bool
advanced: True
readonly: True
helpLink: kratos
oidc:
enabled:
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
+9 -142
View File
@@ -24,14 +24,6 @@ BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
SALTUPGRADED=false
SALT_CLOUD_INSTALLED=false
SALT_CLOUD_CONFIGURED=false
# Check if salt-cloud is installed
if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true
fi
# Check if salt-cloud is configured
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
SALT_CLOUD_CONFIGURED=true
fi
# used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=()
@@ -485,130 +477,6 @@ elasticsearch_backup_index_templates() {
tar -czf /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz -C /opt/so/conf/elasticsearch/templates/index/ .
}
elasticfleet_set_agent_logging_level_warn() {
. /usr/sbin/so-elastic-fleet-common
local current_agent_policies
if ! current_agent_policies=$(fleet_api "agent_policies?perPage=1000"); then
echo "Warning: unable to retrieve Fleet agent policies"
return 0
fi
# Only updating policies that are within Security Onion defaults and do not already have any user configured advanced_settings.
local policies_to_update
policies_to_update=$(jq -c '
.items[]
| select(has("advanced_settings") | not)
| select(
.id == "so-grid-nodes_general"
or .id == "so-grid-nodes_heavy"
or .id == "endpoints-initial"
or (.id | startswith("FleetServer_"))
)
' <<< "$current_agent_policies")
if [[ -z "$policies_to_update" ]]; then
return 0
fi
while IFS= read -r policy; do
[[ -z "$policy" ]] && continue
local policy_id policy_name policy_namespace
policy_id=$(jq -r '.id' <<< "$policy")
policy_name=$(jq -r '.name' <<< "$policy")
policy_namespace=$(jq -r '.namespace' <<< "$policy")
local update_logging
update_logging=$(jq -n \
--arg name "$policy_name" \
--arg namespace "$policy_namespace" \
'{name: $name, namespace: $namespace, advanced_settings: {agent_logging_level: "warning"}}'
)
echo "Setting elastic agent_logging_level to warning on policy '$policy_name' ($policy_id)."
if ! fleet_api "agent_policies/$policy_id" -XPUT -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$update_logging" >/dev/null; then
echo " warning: failed to update agent policy '$policy_name' ($policy_id)" >&2
fi
done <<< "$policies_to_update"
}
check_transform_health_and_reauthorize() {
. /usr/sbin/so-elastic-fleet-common
echo "Checking integration transform jobs for unhealthy / unauthorized status..."
local transforms_doc stats_doc installed_doc
if ! transforms_doc=$(so-elasticsearch-query "_transform/_all?size=1000" --fail --retry 3 --retry-delay 5 2>/dev/null); then
echo "Unable to query for transform jobs, skipping reauthorization."
return 0
fi
if ! stats_doc=$(so-elasticsearch-query "_transform/_all/_stats?size=1000" --fail --retry 3 --retry-delay 5 2>/dev/null); then
echo "Unable to query for transform job stats, skipping reauthorization."
return 0
fi
if ! installed_doc=$(fleet_api "epm/packages/installed?perPage=500"); then
echo "Unable to list installed Fleet packages, skipping reauthorization."
return 0
fi
# Get all transforms that meet the following
# - unhealthy (any non-green health status)
# - metadata has run_as_kibana_system: false (this fix is specific to transforms started prior to Kibana 9.3.3)
# - are not orphaned (integration is not somehow missing/corrupt/uninstalled)
local unhealthy_transforms
unhealthy_transforms=$(jq -c -n \
--argjson t "$transforms_doc" \
--argjson s "$stats_doc" \
--argjson i "$installed_doc" '
($i.items | map({key: .name, value: .version}) | from_entries) as $pkg_ver
| ($s.transforms | map({key: .id, value: .health.status}) | from_entries) as $health
| [ $t.transforms[]
| select(._meta.run_as_kibana_system == false)
| select(($health[.id] // "unknown") != "green")
| {id, pkg: ._meta.package.name, ver: ($pkg_ver[._meta.package.name])}
]
| if length == 0 then empty else . end
| (map(select(.ver == null)) | map({orphan: .id})[]),
(map(select(.ver != null))
| group_by(.pkg)
| map({pkg: .[0].pkg, ver: .[0].ver, transformIds: map(.id)})[])
')
if [[ -z "$unhealthy_transforms" ]]; then
return 0
fi
local unhealthy_count
unhealthy_count=$(jq -s '[.[].transformIds? // empty | .[]] | length' <<< "$unhealthy_transforms")
echo "Found $unhealthy_count transform(s) needing reauthorization."
local total_failures=0
while IFS= read -r transform; do
[[ -z "$transform" ]] && continue
if jq -e 'has("orphan")' <<< "$transform" >/dev/null 2>&1; then
echo "Skipping transform not owned by any installed Fleet package: $(jq -r '.orphan' <<< "$transform")"
continue
fi
local pkg ver body resp
pkg=$(jq -r '.pkg' <<< "$transform")
ver=$(jq -r '.ver' <<< "$transform")
body=$(jq -c '{transforms: (.transformIds | map({transformId: .}))}' <<< "$transform")
echo "Reauthorizing transform(s) for ${pkg}-${ver}..."
resp=$(fleet_api "epm/packages/${pkg}/${ver}/transforms/authorize" \
-XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
-d "$body") || { echo "Could not reauthorize transform(s) for ${pkg}-${ver}"; continue; }
(( total_failures += $(jq 'map(select(.success != true)) | length' <<< "$resp" 2>/dev/null) ))
done <<< "$unhealthy_transforms"
if [[ "$total_failures" -gt 0 ]]; then
echo "Some transform(s) failed to reauthorize."
fi
}
ensure_postgres_local_pillar() {
# Postgres was added as a service after 3.0.0, so the new pillar/top.sls
# references postgres.soc_postgres / postgres.adv_postgres unconditionally.
@@ -658,10 +526,6 @@ up_to_3.1.0() {
post_to_3.1.0() {
/usr/sbin/so-kibana-space-defaults
# ensure manager has new version of socloud.conf
if [[ $SALT_CLOUD_CONFIGURED == true ]]; then
salt-call state.apply salt.cloud.config concurrent=True
fi
# Backfill the Telegraf creds pillar for every accepted minion. so-telegraf-cred
# add is idempotent — it no-ops when an entry already exists — so this is safe
@@ -677,12 +541,6 @@ post_to_3.1.0() {
# file_roots of its own and --local would fail with "No matching sls found".
salt-call state.apply postgres.telegraf_users queue=True || true
# Update default agent policies to use logging level warn.
elasticfleet_set_agent_logging_level_warn || true
# Check for unhealthy / unauthorized integration transform jobs and attempt reauthorizations
check_transform_health_and_reauthorize || true
POSTVERSION=3.1.0
}
@@ -856,6 +714,15 @@ upgrade_check_salt() {
upgrade_salt() {
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# Check if salt-cloud is installed
if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true
fi
# Check if salt-cloud is configured
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
SALT_CLOUD_CONFIGURED=true
fi
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt"
+1 -10
View File
@@ -3,14 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set hypervisor = pillar.get('minion_id', '') %}
{% if not hypervisor|regex_match('^([A-Za-z0-9._-]{1,253})$') %}
{% do salt.log.error('delete_hypervisor_orch: refusing unsafe minion_id=' ~ hypervisor) %}
delete_hypervisor_invalid_minion_id:
test.fail_without_changes:
- name: delete_hypervisor_invalid_minion_id
{% else %}
{% set hypervisor = pillar.minion_id %}
ensure_hypervisor_mine_deleted:
salt.function:
@@ -27,5 +20,3 @@ update_salt_cloud_profile:
- sls:
- salt.cloud.config
- concurrent: True
{% endif %}
+1 -10
View File
@@ -12,14 +12,7 @@
{% if 'vrt' in salt['pillar.get']('features', []) %}
{% do salt.log.debug('vm_pillar_clean_orch: Running') %}
{% set vm_name = pillar.get('vm_name', '') %}
{% if not vm_name|regex_match('^([A-Za-z0-9._-]{1,253})$') %}
{% do salt.log.error('vm_pillar_clean_orch: refusing unsafe vm_name=' ~ vm_name) %}
vm_pillar_clean_invalid_name:
test.fail_without_changes:
- name: vm_pillar_clean_invalid_name
{% else %}
{% set vm_name = pillar.get('vm_name') %}
delete_adv_{{ vm_name }}_pillar:
module.run:
@@ -31,8 +24,6 @@ delete_{{ vm_name }}_pillar:
- file.remove:
- path: /opt/so/saltstack/local/pillar/minions/{{ vm_name }}.sls
{% endif %}
{% else %}
{% do salt.log.error(
+4 -6
View File
@@ -3,15 +3,12 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set hid = data['id'] %}
{% if hid|regex_match('^([A-Za-z0-9._-]{1,253})$')
and hid.endswith('_hypervisor')
and data['result'] == True %}
{% if data['id'].endswith('_hypervisor') and data['result'] == True %}
{% if data['act'] == 'accept' %}
check_and_trigger:
runner.setup_hypervisor.setup_environment:
- minion_id: {{ hid }}
- minion_id: {{ data['id'] }}
{% endif %}
{% if data['act'] == 'delete' %}
@@ -20,7 +17,8 @@ delete_hypervisor:
- args:
- mods: orch.delete_hypervisor
- pillar:
minion_id: {{ hid }}
minion_id: {{ data['id'] }}
{% endif %}
{% endif %}
+15 -27
View File
@@ -1,7 +1,7 @@
#!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
@@ -9,42 +9,30 @@ import logging
import os
import pwd
import grp
import re
log = logging.getLogger(__name__)
PILLAR_ROOT = '/opt/so/saltstack/local/pillar/minions/'
_VMNAME_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
def run():
vm_name = data.get('kwargs', {}).get('name', '')
if not _VMNAME_RE.match(str(vm_name)):
log.error("createEmptyPillar reactor: refusing unsafe vm_name=%r", vm_name)
return {}
log.info("createEmptyPillar reactor: vm_name: %s", vm_name)
vm_name = data['kwargs']['name']
logging.error("createEmptyPillar reactor: vm_name: %s" % vm_name)
pillar_root = '/opt/so/saltstack/local/pillar/minions/'
pillar_files = ['adv_' + vm_name + '.sls', vm_name + '.sls']
try:
# Get socore user and group IDs
socore_uid = pwd.getpwnam('socore').pw_uid
socore_gid = grp.getgrnam('socore').gr_gid
pillar_root_real = os.path.realpath(PILLAR_ROOT)
for f in pillar_files:
full_path = os.path.join(PILLAR_ROOT, f)
resolved = os.path.realpath(full_path)
if os.path.dirname(resolved) != pillar_root_real:
log.error("createEmptyPillar reactor: refusing path outside pillar root: %s", resolved)
continue
if os.path.exists(resolved):
continue
os.mknod(resolved)
os.chown(resolved, socore_uid, socore_gid)
os.chmod(resolved, 0o640)
log.info("createEmptyPillar reactor: created %s with socore:socore ownership and mode 0640", f)
full_path = pillar_root + f
if not os.path.exists(full_path):
# Create empty file
os.mknod(full_path)
# Set ownership to socore:socore
os.chown(full_path, socore_uid, socore_gid)
# Set mode to 644 (rw-r--r--)
os.chmod(full_path, 0o640)
logging.error("createEmptyPillar reactor: created %s with socore:socore ownership and mode 644" % f)
except (KeyError, OSError) as e:
log.error("createEmptyPillar reactor: Error setting ownership/permissions: %s", e)
logging.error("createEmptyPillar reactor: Error setting ownership/permissions: %s" % str(e))
return {}
+11 -33
View File
@@ -1,40 +1,18 @@
#!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import logging
import re
remove_key:
wheel.key.delete:
- args:
- match: {{ data['name'] }}
log = logging.getLogger(__name__)
{{ data['name'] }}_pillar_clean:
runner.state.orchestrate:
- args:
- mods: orch.vm_pillar_clean
- pillar:
vm_name: {{ data['name'] }}
_VMNAME_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
def run():
name = data.get('name', '')
if not _VMNAME_RE.match(str(name)):
log.error("deleteKey reactor: refusing unsafe name=%r", name)
return {}
log.info("deleteKey reactor: deleted minion key: %s", name)
return {
'remove_key': {
'wheel.key.delete': [
{'args': [
{'match': name},
]},
],
},
'%s_pillar_clean' % name: {
'runner.state.orchestrate': [
{'args': [
{'mods': 'orch.vm_pillar_clean'},
{'pillar': {'vm_name': name}},
]},
],
},
}
{% do salt.log.info('deleteKey reactor: deleted minion key: %s' % data['name']) %}
+18 -53
View File
@@ -6,74 +6,39 @@
# Elastic License 2.0.
import logging
import os
import re
import shlex
import subprocess
from subprocess import call
import yaml
log = logging.getLogger(__name__)
SO_MINION = '/usr/sbin/so-minion'
_NODETYPE_RE = re.compile(r'^[A-Z][A-Z0-9_]{0,31}$')
_MINIONID_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
_HOSTPART_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
_IPV4_RE = re.compile(
r'^(?:(?:25[0-5]|2[0-4]\d|[01]?\d?\d)\.){3}'
r'(?:25[0-5]|2[0-4]\d|[01]?\d?\d)$'
)
_HEAP_RE = re.compile(r'^\d{1,6}[kKmMgG]?$')
def _check(name, value, pattern):
s = str(value)
if not pattern.match(s):
raise ValueError("sominion_setup_reactor: refusing unsafe %s=%r" % (name, value))
return s
def run():
log.info('sominion_setup_reactor: Running')
minionid = data['id']
DATA = data['data']
hv_name = DATA['HYPERVISOR_HOST']
log.info('sominion_setup_reactor: DATA: %s' % DATA)
nodetype = _check('NODETYPE', DATA['NODETYPE'], _NODETYPE_RE)
argv = [
SO_MINION,
'-o=addVM',
'-m=' + _check('minionid', minionid, _MINIONID_RE),
'-n=' + _check('MNIC', DATA['MNIC'], _HOSTPART_RE),
'-i=' + _check('MAINIP', DATA['MAINIP'], _IPV4_RE),
'-c=' + str(int(DATA['CPUCORES'])),
'-d=' + str(DATA['NODE_DESCRIPTION']),
]
# Build the base command
cmd = "NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVM -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -c=" + str(DATA['CPUCORES']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'"
# Add optional arguments only if they exist in DATA
if 'CORECOUNT' in DATA:
argv.append('-C=' + str(int(DATA['CORECOUNT'])))
cmd += " -C=" + str(DATA['CORECOUNT'])
if 'INTERFACE' in DATA:
argv.append('-a=' + _check('INTERFACE', DATA['INTERFACE'], _HOSTPART_RE))
cmd += " -a=" + DATA['INTERFACE']
if 'ES_HEAP_SIZE' in DATA:
argv.append('-e=' + _check('ES_HEAP_SIZE', DATA['ES_HEAP_SIZE'], _HEAP_RE))
cmd += " -e=" + DATA['ES_HEAP_SIZE']
if 'LS_HEAP_SIZE' in DATA:
argv.append('-l=' + _check('LS_HEAP_SIZE', DATA['LS_HEAP_SIZE'], _HEAP_RE))
cmd += " -l=" + DATA['LS_HEAP_SIZE']
if 'LSHOSTNAME' in DATA:
argv.append('-L=' + _check('LSHOSTNAME', DATA['LSHOSTNAME'], _HOSTPART_RE))
env = os.environ.copy()
env['NODETYPE'] = nodetype
log.info(
'sominion_setup_reactor: argv: %s (NODETYPE=%s)',
' '.join(shlex.quote(a) for a in argv),
shlex.quote(nodetype),
)
rc = subprocess.call(argv, shell=False, env=env)
cmd += " -L=" + DATA['LSHOSTNAME']
log.info('sominion_setup_reactor: Command: %s' % cmd)
rc = call(cmd, shell=True)
log.info('sominion_setup_reactor: rc: %s' % rc)
@@ -27,7 +27,6 @@ sool9_{{host}}:
log_file: /opt/so/log/salt/minion
grains:
hypervisor_host: {{host ~ "_" ~ role}}
sosmodel: HVGUEST
preflight_cmds:
- |
{%- set hostnames = [MANAGERHOSTNAME] %}
+5
View File
@@ -24,6 +24,11 @@
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}
{% if GLOBALS.postgres is defined and GLOBALS.postgres.auth is defined %}
{% set PG_ADMIN_PASS = salt['pillar.get']('secrets:postgres_pass', '') %}
{% do SOCDEFAULTS.soc.config.server.modules.update({'postgres': {'hostUrl': GLOBALS.manager_ip, 'port': 5432, 'username': GLOBALS.postgres.auth.users.so_postgres_user.user, 'password': GLOBALS.postgres.auth.users.so_postgres_user.pass, 'adminUser': 'postgres', 'adminPassword': PG_ADMIN_PASS, 'dbname': 'securityonion', 'sslMode': 'require', 'assistantEnabled': true, 'esHostUrl': 'https://' ~ GLOBALS.manager_ip ~ ':9200', 'esUsername': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'esPassword': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass, 'esVerifyCert': false}}) %}
{% endif %}
{% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %}
{% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'token': INFLUXDB_TOKEN}) %}
{% for tool in SOCDEFAULTS.soc.config.server.client.tools %}
-5
View File
@@ -3,7 +3,6 @@ soc:
description: Enables or disables SOC. WARNING - Disabling this setting is unsupported and will cause the grid to malfunction. Re-enabling this setting is a manual effort via SSH.
forcedType: bool
advanced: True
readonly: True
telemetryEnabled:
title: SOC Telemetry
description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting.
@@ -891,16 +890,12 @@ soc:
suricata:
description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id.
multiline: True
forcedType: string
strelka:
description: The template used when creating a new Strelka detection.
multiline: True
forcedType: string
elastalert:
description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id.
multiline: True
forcedType: string
grid:
maxUploadSize:
description: The maximum number of bytes for an uploaded PCAP import file.
+1 -1
View File
@@ -15,7 +15,7 @@ from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
with open("/opt/so/conf/strelka/filecheck.yaml", "r") as ymlfile:
cfg = yaml.safe_load(ymlfile)
cfg = yaml.load(ymlfile, Loader=yaml.Loader)
extract_path = cfg["filecheck"]["extract_path"]
historypath = cfg["filecheck"]["historypath"]
+32 -121
View File
@@ -202,10 +202,10 @@ check_service_status() {
systemctl status $service_name > /dev/null 2>&1
local status=$?
if [ $status -gt 0 ]; then
info "$service_name is not running"
info " $service_name is not running"
return 1;
else
info "$service_name is running"
info " $service_name is running"
return 0;
fi
@@ -745,56 +745,6 @@ configure_network_sensor() {
return $err
}
configure_management_bond() {
local bond_name="bond1"
local bond_mode=${MBOND_MODE:-active-backup}
info "Setting up $bond_name management interface with mode $bond_mode"
if [[ ${#MBNICS[@]} -eq 0 ]]; then
error "[ERROR] No management bond NICs were selected."
fail_setup
fi
nmcli -t -f NAME con show | grep -Fxq "$bond_name"
local found_int=$?
if [[ $found_int != 0 ]]; then
nmcli con add type bond ifname "$bond_name" con-name "$bond_name" mode "$bond_mode" -- \
ipv6.method ignore \
connection.autoconnect yes >> "$setup_log" 2>&1
else
nmcli con mod "$bond_name" \
bond.options "mode=$bond_mode" \
ipv6.method ignore \
connection.autoconnect yes >> "$setup_log" 2>&1
fi
local err=0
for MBNIC in "${MBNICS[@]}"; do
local slave_name="$bond_name-slave-$MBNIC"
nmcli -t -f NAME con show | grep -Fxq "$slave_name"
found_int=$?
if [[ $found_int != 0 ]]; then
nmcli con add type ethernet ifname "$MBNIC" con-name "$slave_name" master "$bond_name" -- \
connection.autoconnect yes >> "$setup_log" 2>&1
else
nmcli con mod "$slave_name" \
connection.master "$bond_name" \
connection.slave-type bond \
connection.autoconnect yes >> "$setup_log" 2>&1
fi
nmcli con up "$slave_name" >> "$setup_log" 2>&1
local ret=$?
[[ $ret -eq 0 ]] || err=$ret
done
return $err
}
configure_hyper_bridge() {
info "Setting up hypervisor bridge"
info "Checking $MNIC ipv4.method is auto or manual"
@@ -1049,11 +999,6 @@ filter_unused_nics() {
grep_string="$grep_string\|$BONDNIC"
done
fi
if [[ $MBNICS ]]; then
for BONDNIC in "${MBNICS[@]}"; do
grep_string="$grep_string\|$BONDNIC"
done
fi
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g')
@@ -1443,7 +1388,7 @@ network_init() {
title "Initializing Network"
disable_ipv6
set_hostname
if [[ $is_iso || $is_desktop_iso ]]; then
if [[ ( $is_iso || $is_desktop_iso ) ]]; then
set_management_interface
fi
}
@@ -1604,8 +1549,13 @@ clear_previous_setup_results() {
reinstall_init() {
info "Putting system in state to run setup again"
# Always include both services. check_service_status skips units that aren't present.
local salt_services=( "salt-master" "salt-minion" )
if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|MANAGERHYPE|STANDALONE|FLEET|IMPORT)$ ]]; then
local salt_services=( "salt-master" "salt-minion" )
else
local salt_services=( "salt-minion" )
fi
local service_retry_count=20
{
# remove all of root's cronjobs
@@ -1621,51 +1571,31 @@ reinstall_init() {
salt-call state.apply ca.remove -linfo --local --file-root=../salt
# Stop salt services and force-kill any lingering salt processes (including orphans
# from an earlier reinstall attempt where the unit file is gone but processes survive)
# so dnf remove salt can run cleanly
# Kill any salt processes (safely)
for service in "${salt_services[@]}"; do
# Stop the service in the background so we can exit after a certain amount of time
if check_service_status "$service"; then
info "Stopping $service via systemctl"
systemctl stop "$service"
systemctl stop "$service" &
fi
done
local pid=$!
# Unconditionally force-kill any remaining salt binaries — these may be orphaned
# from a prior aborted reinstall (no unit file, so systemctl can't see them).
for salt_bin in salt-master salt-minion salt-call salt-cloud; do
if pgrep -f "/usr/bin/${salt_bin}" > /dev/null 2>&1; then
info "Force-killing lingering $salt_bin processes"
pkill -9 -ef "/usr/bin/${salt_bin}" 2>/dev/null
fi
done
# Catch stray `salt` CLI children from saltutil.kill_all_jobs / state.apply invocations
pkill -9 -ef "/usr/bin/python3 /bin/salt" 2>/dev/null
local count=0
while check_service_status "$service"; do
if [[ $count -gt $service_retry_count ]]; then
echo "Could not stop $service after 1 minute, exiting setup."
# Give the kernel a moment to reap the killed processes before dnf removes the binaries
local kill_wait=0
while pgrep -f "/usr/bin/salt-" > /dev/null 2>&1; do
if [[ $kill_wait -gt 10 ]]; then
info "Salt processes still present after SIGKILL + 10s wait; proceeding anyway"
pgrep -af "/usr/bin/salt-" | while read -r line; do info " lingering: $line"; done
break
fi
sleep 1
((kill_wait++))
# Stop the systemctl process trying to kill the service, show user a message, then exit setup
kill -9 $pid
fail_setup
fi
sleep 5
((count++))
done
done
# Clear the 'failed' state SIGKILL left on the units before removing the package
systemctl reset-failed salt-master.service salt-minion.service 2>/dev/null || true
# Remove all salt configs
dnf -y remove salt
rm -rf /etc/salt/ /var/cache/salt/
# Drop systemd's in-memory references to the now-removed units
systemctl daemon-reload
# Uninstall local Elastic Agent, if installed
elastic-agent uninstall -f
rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/
if command -v docker &> /dev/null; then
# Stop and remove all so-* containers so files can be changed with more safety
@@ -1689,7 +1619,10 @@ reinstall_init() {
backup_dir /nsm/hydra "$date_string"
backup_dir /nsm/influxdb "$date_string"
} 2>&1 | tee -a "$setup_log"
# Uninstall local Elastic Agent, if installed
elastic-agent uninstall -f
} >> "$setup_log" 2>&1
info "System reinstall init has been completed."
}
@@ -1756,24 +1689,6 @@ remove_package() {
fi
}
ensure_pyyaml() {
title "Ensuring python3-pyyaml is installed"
if rpm -q python3-pyyaml >/dev/null 2>&1; then
info "python3-pyyaml already installed"
return 0
fi
info "python3-pyyaml not found, attempting to install"
set -o pipefail
dnf -y install python3-pyyaml 2>&1 | tee -a "$setup_log"
local result=$?
set +o pipefail
if [[ $result -ne 0 ]] || ! rpm -q python3-pyyaml >/dev/null 2>&1; then
error "Failed to install python3-pyyaml (exit=$result)"
fail_setup
fi
info "python3-pyyaml installed successfully"
}
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml
# CAUTION! SALT VERSION UDDATES - READ BELOW
# When updating the salt version, also update the version in:
@@ -2157,12 +2072,8 @@ set_initial_firewall_access() {
# Set up the management interface on the ISO
set_management_interface() {
title "Setting up the main interface"
if [[ $MNIC == "bond1" ]]; then
configure_management_bond || fail_setup
fi
if [ "$address_type" = 'DHCP' ]; then
logCmd "nmcli con mod $MNIC connection.autoconnect yes ipv4.method auto"
logCmd "nmcli con mod $MNIC connection.autoconnect yes"
logCmd "nmcli con up $MNIC"
logCmd "nmcli -p connection show $MNIC"
else
+1 -4
View File
@@ -66,9 +66,6 @@ set_timezone
# Let's see what OS we are dealing with here
detect_os
# Ensure python3-pyyaml is available before any code that may need so-yaml/PyYAML
ensure_pyyaml
# Check to see if this is the setup type of "desktop".
is_desktop=
@@ -222,7 +219,7 @@ if [ -n "$test_profile" ]; then
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r
NODE_DESCRIPTION="${HOSTNAME} - ${install_type} - ${MSRVIP_OFFSET}"
NODE_DESCRIPTION="${HOSTNAME} - ${install_type} - ${MAINIP}"
update_sudoers_for_testing
fi
+2 -83
View File
@@ -845,99 +845,18 @@ whiptail_management_nic() {
[ -n "$TESTING" ] && return
filter_unused_nics
local management_nic_options=( "${nic_list_management[@]}" )
if [[ $is_iso || $is_desktop_iso ]]; then
management_nic_options+=( "BOND" "Configure a bonded management interface" )
fi
MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 20 75 12 "${management_nic_options[@]}" 3>&1 1>&2 2>&3 )
MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 20 75 12 "${nic_list_management[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [ -z "$MNIC" ]
do
MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 22 75 12 "${management_nic_options[@]}" 3>&1 1>&2 2>&3 )
MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 22 75 12 "${nic_list_management[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
if [[ $MNIC == "BOND" ]]; then
whiptail_management_bond
fi
}
whiptail_management_bond() {
[ -n "$TESTING" ] && return
MBOND_MODE=$(whiptail --title "$whiptail_title" --menu \
"Choose the bond mode for the management interface.\n\nThe management bond will be created as bond1." 20 75 7 \
"active-backup" "One active NIC with failover (recommended)" \
"balance-rr" "Round-robin transmit policy" \
"balance-xor" "Transmit based on selected hash policy" \
"broadcast" "Transmit everything on all slave interfaces" \
"802.3ad" "Dynamic link aggregation (requires switch support)" \
"balance-tlb" "Adaptive transmit load balancing" \
"balance-alb" "Adaptive load balancing" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [ -z "$MBOND_MODE" ]
do
MBOND_MODE=$(whiptail --title "$whiptail_title" --menu \
"Choose the bond mode for the management interface.\n\nThe management bond will be created as bond1." 20 75 7 \
"active-backup" "One active NIC with failover (recommended)" \
"balance-rr" "Round-robin transmit policy" \
"balance-xor" "Transmit based on selected hash policy" \
"broadcast" "Transmit everything on all slave interfaces" \
"802.3ad" "Dynamic link aggregation (requires switch support)" \
"balance-tlb" "Adaptive transmit load balancing" \
"balance-alb" "Adaptive load balancing" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
whiptail_management_bond_nics
MNIC="bond1"
export MBOND_MODE MNIC
}
whiptail_management_bond_nics() {
[ -n "$TESTING" ] && return
MBNICS=()
filter_unused_nics
MBNICS=$(whiptail --title "$whiptail_title" --checklist "Please add NICs to the Management Interface:" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [ -z "$MBNICS" ]
do
MBNICS=$(whiptail --title "$whiptail_title" --checklist "Please add NICs to the Management Interface:" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
MBNICS=$(echo "$MBNICS" | tr -d '"')
IFS=' ' read -ra MBNICS <<< "$MBNICS"
for bond_nic in "${MBNICS[@]}"; do
for dev_status in "${nmcli_dev_status_list[@]}"; do
if [[ $dev_status == "${bond_nic}:unmanaged" ]]; then
whiptail \
--title "$whiptail_title" \
--msgbox "$bond_nic is unmanaged by Network Manager. Please remove it from other network management tools then re-run setup." \
8 75
exit
fi
done
done
export MBNICS
}
whiptail_net_method() {