mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-05-09 21:02:36 +02:00
Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ee36db4dd7 | |||
| b7faa0e437 | |||
| fad953b2b3 | |||
| 2c341e5160 | |||
| 8425ac4100 | |||
| 922fc60466 | |||
| da69f0f1a4 | |||
| 29b24fa263 | |||
| 981d8bb805 | |||
| 4e3dbd800c | |||
| dc998191d9 | |||
| 9cce920d78 | |||
| a5e5f12889 | |||
| 999f3f5b15 | |||
| 6f9da893ac | |||
| 0d3e2a0708 | |||
| e339aa41d5 | |||
| 01a24b3684 | |||
| f1cdd265f9 | |||
| 631f5bd754 | |||
| fb4615d5cd | |||
| 6eaf22fc5a | |||
| 592a6a4c21 | |||
| 409d4fb632 | |||
| 9d72149fcd | |||
| e6afecbaa9 |
@@ -35,8 +35,6 @@
|
|||||||
'kratos',
|
'kratos',
|
||||||
'hydra',
|
'hydra',
|
||||||
'elasticfleet',
|
'elasticfleet',
|
||||||
'elasticfleet.manager',
|
|
||||||
'elasticsearch.cluster',
|
|
||||||
'elastic-fleet-package-registry',
|
'elastic-fleet-package-registry',
|
||||||
'utility'
|
'utility'
|
||||||
] %}
|
] %}
|
||||||
@@ -81,7 +79,7 @@
|
|||||||
),
|
),
|
||||||
'so-heavynode': (
|
'so-heavynode': (
|
||||||
sensor_states +
|
sensor_states +
|
||||||
['elasticagent', 'elasticsearch', 'elasticsearch.cluster', 'logstash', 'redis', 'nginx']
|
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||||
),
|
),
|
||||||
'so-idh': (
|
'so-idh': (
|
||||||
['idh']
|
['idh']
|
||||||
|
|||||||
@@ -188,27 +188,8 @@ update_docker_containers() {
|
|||||||
if [ -z "$HOSTNAME" ]; then
|
if [ -z "$HOSTNAME" ]; then
|
||||||
HOSTNAME=$(hostname)
|
HOSTNAME=$(hostname)
|
||||||
fi
|
fi
|
||||||
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 || {
|
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
|
||||||
echo "Unable to tag $image" >> "$LOG_FILE" 2>&1
|
docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
# Push to the embedded registry via a registry-to-registry copy. Avoids
|
|
||||||
# `docker push`, which on Docker 29.x with the containerd image store
|
|
||||||
# represents freshly-pulled images as an index whose layer content
|
|
||||||
# isn't reachable through the push path. The local `docker tag` above
|
|
||||||
# is preserved so so-image-pull's `:5000` existence check still works.
|
|
||||||
# Pin to the digest already gpg-verified above so we copy exactly the
|
|
||||||
# bytes we approved.
|
|
||||||
local VERIFIED_REF
|
|
||||||
VERIFIED_REF=$(echo "$DOCKERINSPECT" | jq -r ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" | head -n 1)
|
|
||||||
if [ -z "$VERIFIED_REF" ] || [ "$VERIFIED_REF" = "null" ]; then
|
|
||||||
echo "Unable to determine verified digest for $image" >> "$LOG_FILE" 2>&1
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
docker buildx imagetools create --tag $HOSTNAME:5000/$IMAGEREPO/$image "$VERIFIED_REF" >> "$LOG_FILE" 2>&1 || {
|
|
||||||
echo "Unable to copy $image to embedded registry" >> "$LOG_FILE" 2>&1
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1
|
echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1
|
||||||
|
|||||||
@@ -227,7 +227,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
|||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint|armis|o365_metrics|microsoft_sentinel|snyk).*user so_kibana lacks the required permissions \[(logs|metrics)-\1" # Known issue with integrations starting transform jobs that are explicitly not allowed to start as a system user. (installed as so_elastic / so_kibana)
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint).*user so_kibana lacks the required permissions \[logs-\1" # Known issue with 3 integrations using kibana_system role vs creating unique api creds with proper permissions.
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02" "HVGUEST")
|
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
|
||||||
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
|
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
|
||||||
|
|
||||||
{%- if salt['grains.get']('sosmodel', '') %}
|
{%- if salt['grains.get']('sosmodel', '') %}
|
||||||
@@ -87,11 +87,6 @@ check_boss_raid() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
check_software_raid() {
|
check_software_raid() {
|
||||||
if [[ ! -f /proc/mdstat ]]; then
|
|
||||||
SWRAID=0
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
SWRC=$(grep "_" /proc/mdstat)
|
SWRC=$(grep "_" /proc/mdstat)
|
||||||
if [[ -n $SWRC ]]; then
|
if [[ -n $SWRC ]]; then
|
||||||
# RAID is failed in some way
|
# RAID is failed in some way
|
||||||
@@ -112,9 +107,7 @@ if [[ "$is_hwraid" == "true" ]]; then
|
|||||||
fi
|
fi
|
||||||
if [[ "$is_softwareraid" == "true" ]]; then
|
if [[ "$is_softwareraid" == "true" ]]; then
|
||||||
check_software_raid
|
check_software_raid
|
||||||
if [ "$model" != "HVGUEST" ]; then
|
|
||||||
check_boss_raid
|
check_boss_raid
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
||||||
|
|||||||
@@ -17,17 +17,65 @@ include:
|
|||||||
- logstash.ssl
|
- logstash.ssl
|
||||||
- elasticfleet.config
|
- elasticfleet.config
|
||||||
- elasticfleet.sostatus
|
- elasticfleet.sostatus
|
||||||
{%- if GLOBALS.role != "so-fleet" %}
|
|
||||||
- elasticfleet.manager
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
{% if GLOBALS.role != "so-fleet" %}
|
{% if grains.role not in ['so-fleet'] %}
|
||||||
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
||||||
wait_for_elasticsearch_elasticfleet:
|
wait_for_elasticsearch_elasticfleet:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: so-elasticsearch-wait
|
- name: so-elasticsearch-wait
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# If enabled, automatically update Fleet Logstash Outputs
|
||||||
|
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
|
||||||
|
so-elastic-fleet-auto-configure-logstash-outputs:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-outputs-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||||
|
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
- onchanges:
|
||||||
|
- x509: etc_elasticfleet_logstash_crt
|
||||||
|
- x509: elasticfleet_kafka_crt
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||||
|
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-fleet'] %}
|
||||||
|
so-elastic-fleet-auto-configure-server-urls:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-urls-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
|
||||||
|
{% if grains.role not in ['so-fleet'] %}
|
||||||
|
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
so-elastic-fleet-auto-configure-artifact-urls:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Sync Elastic Agent artifacts to Fleet Node
|
# Sync Elastic Agent artifacts to Fleet Node
|
||||||
|
{% if grains.role in ['so-fleet'] %}
|
||||||
elasticagent_syncartifacts:
|
elasticagent_syncartifacts:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /nsm/elastic-fleet/artifacts/beats
|
- name: /nsm/elastic-fleet/artifacts/beats
|
||||||
@@ -101,6 +149,57 @@ so-elastic-fleet:
|
|||||||
- x509: etc_elasticfleet_crt
|
- x509: etc_elasticfleet_crt
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{% if GLOBALS.role != "so-fleet" %}
|
||||||
|
so-elastic-fleet-package-statefile:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/state/elastic_fleet_packages.txt
|
||||||
|
- contents: {{ELASTICFLEETMERGED.packages}}
|
||||||
|
|
||||||
|
so-elastic-fleet-package-upgrade:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
- onchanges:
|
||||||
|
- file: /opt/so/state/elastic_fleet_packages.txt
|
||||||
|
|
||||||
|
so-elastic-fleet-integrations:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
|
||||||
|
so-elastic-agent-grid-upgrade:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-agent-grid-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 12
|
||||||
|
interval: 5
|
||||||
|
|
||||||
|
so-elastic-fleet-integration-upgrade:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
|
||||||
|
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
||||||
|
so-elastic-fleet-addon-integrations:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
||||||
|
|
||||||
|
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
|
||||||
|
so-elastic-defend-manage-filters-file-watch:
|
||||||
|
cmd.run:
|
||||||
|
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
|
||||||
|
- onchanges:
|
||||||
|
- file: elasticdefendcustom
|
||||||
|
- file: elasticdefenddisabled
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
delete_so-elastic-fleet_so-status.disabled:
|
delete_so-elastic-fleet_so-status.disabled:
|
||||||
file.uncomment:
|
file.uncomment:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
|||||||
@@ -1,112 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
|
||||||
{% if sls in allowed_states %}
|
|
||||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
|
||||||
|
|
||||||
include:
|
|
||||||
- elasticfleet.config
|
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Logstash Outputs
|
|
||||||
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval'] %}
|
|
||||||
so-elastic-fleet-auto-configure-logstash-outputs:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-outputs-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
|
||||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
- onchanges:
|
|
||||||
- x509: etc_elasticfleet_logstash_crt
|
|
||||||
- x509: elasticfleet_kafka_crt
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
|
||||||
so-elastic-fleet-auto-configure-server-urls:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-urls-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
|
|
||||||
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
so-elastic-fleet-auto-configure-artifact-urls:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
so-elastic-fleet-package-statefile:
|
|
||||||
file.managed:
|
|
||||||
- name: /opt/so/state/elastic_fleet_packages.txt
|
|
||||||
- contents: {{ELASTICFLEETMERGED.packages}}
|
|
||||||
|
|
||||||
so-elastic-fleet-package-upgrade:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
|
||||||
- retry:
|
|
||||||
attempts: 3
|
|
||||||
interval: 10
|
|
||||||
- onchanges:
|
|
||||||
- file: /opt/so/state/elastic_fleet_packages.txt
|
|
||||||
|
|
||||||
so-elastic-fleet-integrations:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
|
||||||
- retry:
|
|
||||||
attempts: 3
|
|
||||||
interval: 10
|
|
||||||
|
|
||||||
so-elastic-agent-grid-upgrade:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-agent-grid-upgrade
|
|
||||||
- retry:
|
|
||||||
attempts: 12
|
|
||||||
interval: 5
|
|
||||||
|
|
||||||
so-elastic-fleet-integration-upgrade:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
|
||||||
- retry:
|
|
||||||
attempts: 3
|
|
||||||
interval: 10
|
|
||||||
|
|
||||||
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
|
||||||
so-elastic-fleet-addon-integrations:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
|
||||||
|
|
||||||
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
|
|
||||||
so-elastic-defend-manage-filters-file-watch:
|
|
||||||
cmd.run:
|
|
||||||
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
|
|
||||||
- onchanges:
|
|
||||||
- file: elasticdefendcustom
|
|
||||||
- file: elasticdefenddisabled
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
|
||||||
test.fail_without_changes:
|
|
||||||
- name: {{sls}}_state_not_allowed
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
@@ -5,12 +5,11 @@
|
|||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
. /usr/sbin/so-elastic-fleet-common
|
|
||||||
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
|
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
|
||||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||||
{# Optionally override Elasticsearch version for Elastic Agent patch releases #}
|
{# Optionally override Elasticsearch version for Elastic Agent patch releases #}
|
||||||
{%- if ELASTICFLEETDEFAULTS.elasticfleet.patch_version is defined %}
|
{%- if ELASTICFLEETDEFAULTS.elasticfleet.patch_version is defined %}
|
||||||
{%- do ELASTICSEARCHDEFAULTS.elasticsearch.update({'version': ELASTICFLEETDEFAULTS.elasticfleet.patch_version}) %}
|
{%- do ELASTICSEARCHDEFAULTS.update({'elasticsearch': {'version': ELASTICFLEETDEFAULTS.elasticfleet.patch_version}}) %}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
# Only run on Managers
|
# Only run on Managers
|
||||||
@@ -20,8 +19,11 @@ if ! is_manager_node; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Get current list of Grid Node Agents that need to be upgraded
|
# Get current list of Grid Node Agents that need to be upgraded
|
||||||
if ! RAW_JSON=$(fleet_api "agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version | urlencode }}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||||
|
|
||||||
|
# Check to make sure that the server responded with good data - else, bail from script
|
||||||
|
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
|
||||||
|
if [ "$CHECKSUM" -ne 1 ]; then
|
||||||
printf "Failed to query for current Grid Agents...\n"
|
printf "Failed to query for current Grid Agents...\n"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -34,12 +36,10 @@ if [ "$OUTDATED_LIST" != '[]' ]; then
|
|||||||
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n"
|
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n"
|
||||||
|
|
||||||
# Generate updated JSON payload
|
# Generate updated JSON payload
|
||||||
JSON_STRING=$(jq -n --arg ELASTICVERSION "{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}" --argjson UPDATELIST "$OUTDATED_LIST" '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
|
JSON_STRING=$(jq -n --arg ELASTICVERSION {{ELASTICSEARCHDEFAULTS.elasticsearch.version}} --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
|
||||||
|
|
||||||
# Update Node Agents
|
# Update Node Agents
|
||||||
if ! fleet_api "agents/bulk_upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||||
printf "Failed to initiate Agent upgrades...\n"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
printf "No Agents need updates... Exiting\n\n"
|
printf "No Agents need updates... Exiting\n\n"
|
||||||
exit 0
|
exit 0
|
||||||
|
|||||||
@@ -235,16 +235,6 @@ function update_kafka_outputs() {
|
|||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Compare the current Elastic Fleet certificate against what is on disk
|
|
||||||
POLICY_CERT_SHA=$(jq -r '.item.ssl.certificate' <<< $RAW_JSON | openssl x509 -noout -sha256 -fingerprint)
|
|
||||||
DISK_CERT_SHA=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt -noout -sha256 -fingerprint)
|
|
||||||
|
|
||||||
if [[ "$POLICY_CERT_SHA" != "$DISK_CERT_SHA" ]]; then
|
|
||||||
printf "Certificate on disk doesn't match certificate in policy - forcing update\n"
|
|
||||||
UPDATE_CERTS=true
|
|
||||||
FORCE_UPDATE=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Sort & hash the new list of Logstash Outputs
|
# Sort & hash the new list of Logstash Outputs
|
||||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||||
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS, SO_MANAGED_INDICES %}
|
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS, SO_MANAGED_INDICES %}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ include:
|
|||||||
- elasticsearch.ssl
|
- elasticsearch.ssl
|
||||||
- elasticsearch.config
|
- elasticsearch.config
|
||||||
- elasticsearch.sostatus
|
- elasticsearch.sostatus
|
||||||
{%- if GLOBALS.role != "so-searchnode" %}
|
{%- if GLOBALS.role != 'so-searchode' %}
|
||||||
- elasticsearch.cluster
|
- elasticsearch.cluster
|
||||||
{%- endif%}
|
{%- endif%}
|
||||||
|
|
||||||
@@ -102,6 +102,11 @@ so-elasticsearch:
|
|||||||
- cmd: auth_users_roles_inode
|
- cmd: auth_users_roles_inode
|
||||||
- cmd: auth_users_inode
|
- cmd: auth_users_inode
|
||||||
|
|
||||||
|
delete_so-elasticsearch_so-status.disabled:
|
||||||
|
file.uncomment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-elasticsearch$
|
||||||
|
|
||||||
wait_for_so-elasticsearch:
|
wait_for_so-elasticsearch:
|
||||||
http.wait_for_successful_query:
|
http.wait_for_successful_query:
|
||||||
- name: "https://localhost:9200/"
|
- name: "https://localhost:9200/"
|
||||||
@@ -112,14 +117,10 @@ wait_for_so-elasticsearch:
|
|||||||
- status: 200
|
- status: 200
|
||||||
- wait_for: 300
|
- wait_for: 300
|
||||||
- request_interval: 15
|
- request_interval: 15
|
||||||
|
- backend: requests
|
||||||
- require:
|
- require:
|
||||||
- docker_container: so-elasticsearch
|
- docker_container: so-elasticsearch
|
||||||
|
|
||||||
delete_so-elasticsearch_so-status.disabled:
|
|
||||||
file.uncomment:
|
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
|
||||||
- regex: ^so-elasticsearch$
|
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
{{sls}}_state_not_allowed:
|
||||||
|
|||||||
@@ -103,13 +103,11 @@ load_component_templates() {
|
|||||||
local pattern="${ELASTICSEARCH_TEMPLATES_DIR}/component/$2"
|
local pattern="${ELASTICSEARCH_TEMPLATES_DIR}/component/$2"
|
||||||
local append_mappings="${3:-"false"}"
|
local append_mappings="${3:-"false"}"
|
||||||
|
|
||||||
|
# current state of nullglob shell option
|
||||||
|
shopt -q nullglob && nullglob_set=1 || nullglob_set=0
|
||||||
|
|
||||||
|
shopt -s nullglob
|
||||||
echo -e "\nLoading $printed_name component templates...\n"
|
echo -e "\nLoading $printed_name component templates...\n"
|
||||||
|
|
||||||
if ! compgen -G "${pattern}/*.json" > /dev/null; then
|
|
||||||
echo "No $printed_name component templates found in ${pattern}, skipping."
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
for component in "$pattern"/*.json; do
|
for component in "$pattern"/*.json; do
|
||||||
tmpl_name=$(basename "${component%.json}")
|
tmpl_name=$(basename "${component%.json}")
|
||||||
|
|
||||||
@@ -123,6 +121,11 @@ load_component_templates() {
|
|||||||
SO_LOAD_FAILURES_NAMES+=("$component")
|
SO_LOAD_FAILURES_NAMES+=("$component")
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# restore nullglob shell option if needed
|
||||||
|
if [[ $nullglob_set -eq 1 ]]; then
|
||||||
|
shopt -u nullglob
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
check_elasticsearch_responsive() {
|
check_elasticsearch_responsive() {
|
||||||
@@ -133,32 +136,7 @@ check_elasticsearch_responsive() {
|
|||||||
fail "Elasticsearch is not responding. Please review Elasticsearch logs /opt/so/log/elasticsearch/securityonion.log for more details. Additionally, consider running so-elasticsearch-troubleshoot."
|
fail "Elasticsearch is not responding. Please review Elasticsearch logs /opt/so/log/elasticsearch/securityonion.log for more details. Additionally, consider running so-elasticsearch-troubleshoot."
|
||||||
}
|
}
|
||||||
|
|
||||||
index_templates_exist() {
|
if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]]; then
|
||||||
local templates_dir="$1"
|
|
||||||
|
|
||||||
if [[ ! -d "$templates_dir" ]]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
compgen -G "${templates_dir}/*.json" > /dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
should_load_addon_templates() {
|
|
||||||
if [[ "$IS_HEAVYNODE" == "true" ]]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Skip statefile checks when forcing template load
|
|
||||||
if [[ "$FORCE" != "true" ]]; then
|
|
||||||
if [[ ! -f "$SO_STATEFILE_SUCCESS" || -f "$ADDON_STATEFILE_SUCCESS" ]]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
index_templates_exist "$ADDON_TEMPLATES_DIR"
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]] && index_templates_exist "$SO_TEMPLATES_DIR"; then
|
|
||||||
check_elasticsearch_responsive
|
check_elasticsearch_responsive
|
||||||
|
|
||||||
if [[ "$IS_HEAVYNODE" == "false" ]]; then
|
if [[ "$IS_HEAVYNODE" == "false" ]]; then
|
||||||
@@ -223,14 +201,13 @@ if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]] && index_templates_e
|
|||||||
fail "Failed to load all Security Onion core templates successfully."
|
fail "Failed to load all Security Onion core templates successfully."
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
elif ! index_templates_exist "$SO_TEMPLATES_DIR"; then
|
else
|
||||||
echo "No Security Onion core index templates found in ${SO_TEMPLATES_DIR}, skipping."
|
|
||||||
elif [[ -f "$SO_STATEFILE_SUCCESS" ]]; then
|
|
||||||
echo "Security Onion core templates already loaded"
|
echo "Security Onion core templates already loaded"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start loading addon templates
|
# Start loading addon templates
|
||||||
if should_load_addon_templates; then
|
if [[ (-d "$ADDON_TEMPLATES_DIR" && -f "$SO_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" && ! -f "$ADDON_STATEFILE_SUCCESS") || (-d "$ADDON_TEMPLATES_DIR" && "$IS_HEAVYNODE" == "false" && "$FORCE" == "true") ]]; then
|
||||||
|
|
||||||
check_elasticsearch_responsive
|
check_elasticsearch_responsive
|
||||||
|
|
||||||
|
|||||||
@@ -398,7 +398,6 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -411,7 +410,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -429,7 +427,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
@@ -440,7 +437,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -454,7 +450,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -464,7 +459,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -498,7 +492,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -509,7 +502,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -618,7 +610,6 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -631,7 +622,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -649,7 +639,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
@@ -660,7 +649,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -674,7 +662,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -684,7 +671,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -716,7 +702,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -727,7 +712,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -836,7 +820,6 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -849,7 +832,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -867,7 +849,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
@@ -877,7 +858,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -890,7 +870,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -900,7 +879,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -934,7 +912,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -945,7 +922,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -1064,7 +1040,6 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -1077,7 +1052,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -1089,7 +1063,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -1101,7 +1074,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- redis
|
- redis
|
||||||
@@ -1111,7 +1083,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- redis
|
- redis
|
||||||
@@ -1122,7 +1093,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -1159,7 +1129,6 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -1170,7 +1139,6 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -1514,7 +1482,6 @@ firewall:
|
|||||||
- kibana
|
- kibana
|
||||||
- redis
|
- redis
|
||||||
- influxdb
|
- influxdb
|
||||||
- postgres
|
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||||
|
{% from 'telegraf/map.jinja' import TELEGRAFMERGED %}
|
||||||
{% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %}
|
{% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %}
|
||||||
|
|
||||||
{# add our ip to self #}
|
{# add our ip to self #}
|
||||||
@@ -55,4 +56,16 @@
|
|||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{# Open Postgres (5432) to minion hostgroups when Telegraf is configured to write to Postgres #}
|
||||||
|
{% set TG_OUT = TELEGRAFMERGED.output | upper %}
|
||||||
|
{% if TG_OUT in ['POSTGRES', 'BOTH'] %}
|
||||||
|
{% if role.startswith('manager') or role == 'standalone' or role == 'eval' %}
|
||||||
|
{% for r in ['sensor', 'searchnode', 'heavynode', 'receiver', 'fleet', 'idh', 'desktop', 'import'] %}
|
||||||
|
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||||
|
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('postgres') %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||||
|
|||||||
@@ -59,4 +59,5 @@ global:
|
|||||||
description: Allows use of Endgame with Security Onion. This feature requires a license from Endgame.
|
description: Allows use of Endgame with Security Onion. This feature requires a license from Endgame.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
|
helpLink: influxdb
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ kibana:
|
|||||||
- default
|
- default
|
||||||
- file
|
- file
|
||||||
migrations:
|
migrations:
|
||||||
discardCorruptObjects: "9.3.3"
|
discardCorruptObjects: "8.18.8"
|
||||||
telemetry:
|
telemetry:
|
||||||
enabled: False
|
enabled: False
|
||||||
xpack:
|
xpack:
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ kratos:
|
|||||||
description: Enables or disables the Kratos authentication system. WARNING - Disabling this process will cause the grid to malfunction. Re-enabling this setting will require manual effort via SSH.
|
description: Enables or disables the Kratos authentication system. WARNING - Disabling this process will cause the grid to malfunction. Re-enabling this setting will require manual effort via SSH.
|
||||||
forcedType: bool
|
forcedType: bool
|
||||||
advanced: True
|
advanced: True
|
||||||
readonly: True
|
|
||||||
helpLink: kratos
|
helpLink: kratos
|
||||||
|
|
||||||
oidc:
|
oidc:
|
||||||
enabled:
|
enabled:
|
||||||
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
|
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
|
||||||
|
|||||||
@@ -24,14 +24,6 @@ BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
|||||||
SALTUPGRADED=false
|
SALTUPGRADED=false
|
||||||
SALT_CLOUD_INSTALLED=false
|
SALT_CLOUD_INSTALLED=false
|
||||||
SALT_CLOUD_CONFIGURED=false
|
SALT_CLOUD_CONFIGURED=false
|
||||||
# Check if salt-cloud is installed
|
|
||||||
if rpm -q salt-cloud &>/dev/null; then
|
|
||||||
SALT_CLOUD_INSTALLED=true
|
|
||||||
fi
|
|
||||||
# Check if salt-cloud is configured
|
|
||||||
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
|
|
||||||
SALT_CLOUD_CONFIGURED=true
|
|
||||||
fi
|
|
||||||
# used to display messages to the user at the end of soup
|
# used to display messages to the user at the end of soup
|
||||||
declare -a FINAL_MESSAGE_QUEUE=()
|
declare -a FINAL_MESSAGE_QUEUE=()
|
||||||
|
|
||||||
@@ -534,10 +526,6 @@ up_to_3.1.0() {
|
|||||||
|
|
||||||
post_to_3.1.0() {
|
post_to_3.1.0() {
|
||||||
/usr/sbin/so-kibana-space-defaults
|
/usr/sbin/so-kibana-space-defaults
|
||||||
# ensure manager has new version of socloud.conf
|
|
||||||
if [[ $SALT_CLOUD_CONFIGURED == true ]]; then
|
|
||||||
salt-call state.apply salt.cloud.config concurrent=True
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Backfill the Telegraf creds pillar for every accepted minion. so-telegraf-cred
|
# Backfill the Telegraf creds pillar for every accepted minion. so-telegraf-cred
|
||||||
# add is idempotent — it no-ops when an entry already exists — so this is safe
|
# add is idempotent — it no-ops when an entry already exists — so this is safe
|
||||||
@@ -726,6 +714,15 @@ upgrade_check_salt() {
|
|||||||
upgrade_salt() {
|
upgrade_salt() {
|
||||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||||
echo ""
|
echo ""
|
||||||
|
# Check if salt-cloud is installed
|
||||||
|
if rpm -q salt-cloud &>/dev/null; then
|
||||||
|
SALT_CLOUD_INSTALLED=true
|
||||||
|
fi
|
||||||
|
# Check if salt-cloud is configured
|
||||||
|
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
|
||||||
|
SALT_CLOUD_CONFIGURED=true
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Removing yum versionlock for Salt."
|
echo "Removing yum versionlock for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
yum versionlock delete "salt"
|
yum versionlock delete "salt"
|
||||||
|
|||||||
@@ -6,74 +6,39 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
from subprocess import call
|
||||||
import re
|
import yaml
|
||||||
import shlex
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
SO_MINION = '/usr/sbin/so-minion'
|
|
||||||
|
|
||||||
_NODETYPE_RE = re.compile(r'^[A-Z][A-Z0-9_]{0,31}$')
|
|
||||||
_MINIONID_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
|
|
||||||
_HOSTPART_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
|
|
||||||
_IPV4_RE = re.compile(
|
|
||||||
r'^(?:(?:25[0-5]|2[0-4]\d|[01]?\d?\d)\.){3}'
|
|
||||||
r'(?:25[0-5]|2[0-4]\d|[01]?\d?\d)$'
|
|
||||||
)
|
|
||||||
_HEAP_RE = re.compile(r'^\d{1,6}[kKmMgG]?$')
|
|
||||||
|
|
||||||
|
|
||||||
def _check(name, value, pattern):
|
|
||||||
s = str(value)
|
|
||||||
if not pattern.match(s):
|
|
||||||
raise ValueError("sominion_setup_reactor: refusing unsafe %s=%r" % (name, value))
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
log.info('sominion_setup_reactor: Running')
|
log.info('sominion_setup_reactor: Running')
|
||||||
minionid = data['id']
|
minionid = data['id']
|
||||||
DATA = data['data']
|
DATA = data['data']
|
||||||
|
hv_name = DATA['HYPERVISOR_HOST']
|
||||||
log.info('sominion_setup_reactor: DATA: %s' % DATA)
|
log.info('sominion_setup_reactor: DATA: %s' % DATA)
|
||||||
|
|
||||||
nodetype = _check('NODETYPE', DATA['NODETYPE'], _NODETYPE_RE)
|
# Build the base command
|
||||||
|
cmd = "NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVM -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -c=" + str(DATA['CPUCORES']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'"
|
||||||
argv = [
|
|
||||||
SO_MINION,
|
|
||||||
'-o=addVM',
|
|
||||||
'-m=' + _check('minionid', minionid, _MINIONID_RE),
|
|
||||||
'-n=' + _check('MNIC', DATA['MNIC'], _HOSTPART_RE),
|
|
||||||
'-i=' + _check('MAINIP', DATA['MAINIP'], _IPV4_RE),
|
|
||||||
'-c=' + str(int(DATA['CPUCORES'])),
|
|
||||||
'-d=' + str(DATA['NODE_DESCRIPTION']),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
# Add optional arguments only if they exist in DATA
|
||||||
if 'CORECOUNT' in DATA:
|
if 'CORECOUNT' in DATA:
|
||||||
argv.append('-C=' + str(int(DATA['CORECOUNT'])))
|
cmd += " -C=" + str(DATA['CORECOUNT'])
|
||||||
|
|
||||||
if 'INTERFACE' in DATA:
|
if 'INTERFACE' in DATA:
|
||||||
argv.append('-a=' + _check('INTERFACE', DATA['INTERFACE'], _HOSTPART_RE))
|
cmd += " -a=" + DATA['INTERFACE']
|
||||||
|
|
||||||
if 'ES_HEAP_SIZE' in DATA:
|
if 'ES_HEAP_SIZE' in DATA:
|
||||||
argv.append('-e=' + _check('ES_HEAP_SIZE', DATA['ES_HEAP_SIZE'], _HEAP_RE))
|
cmd += " -e=" + DATA['ES_HEAP_SIZE']
|
||||||
|
|
||||||
if 'LS_HEAP_SIZE' in DATA:
|
if 'LS_HEAP_SIZE' in DATA:
|
||||||
argv.append('-l=' + _check('LS_HEAP_SIZE', DATA['LS_HEAP_SIZE'], _HEAP_RE))
|
cmd += " -l=" + DATA['LS_HEAP_SIZE']
|
||||||
|
|
||||||
if 'LSHOSTNAME' in DATA:
|
if 'LSHOSTNAME' in DATA:
|
||||||
argv.append('-L=' + _check('LSHOSTNAME', DATA['LSHOSTNAME'], _HOSTPART_RE))
|
cmd += " -L=" + DATA['LSHOSTNAME']
|
||||||
|
|
||||||
env = os.environ.copy()
|
log.info('sominion_setup_reactor: Command: %s' % cmd)
|
||||||
env['NODETYPE'] = nodetype
|
rc = call(cmd, shell=True)
|
||||||
|
|
||||||
log.info(
|
|
||||||
'sominion_setup_reactor: argv: %s (NODETYPE=%s)',
|
|
||||||
' '.join(shlex.quote(a) for a in argv),
|
|
||||||
shlex.quote(nodetype),
|
|
||||||
)
|
|
||||||
rc = subprocess.call(argv, shell=False, env=env)
|
|
||||||
|
|
||||||
log.info('sominion_setup_reactor: rc: %s' % rc)
|
log.info('sominion_setup_reactor: rc: %s' % rc)
|
||||||
|
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ sool9_{{host}}:
|
|||||||
log_file: /opt/so/log/salt/minion
|
log_file: /opt/so/log/salt/minion
|
||||||
grains:
|
grains:
|
||||||
hypervisor_host: {{host ~ "_" ~ role}}
|
hypervisor_host: {{host ~ "_" ~ role}}
|
||||||
sosmodel: HVGUEST
|
|
||||||
preflight_cmds:
|
preflight_cmds:
|
||||||
- |
|
- |
|
||||||
{%- set hostnames = [MANAGERHOSTNAME] %}
|
{%- set hostnames = [MANAGERHOSTNAME] %}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ soc:
|
|||||||
description: Enables or disables SOC. WARNING - Disabling this setting is unsupported and will cause the grid to malfunction. Re-enabling this setting is a manual effort via SSH.
|
description: Enables or disables SOC. WARNING - Disabling this setting is unsupported and will cause the grid to malfunction. Re-enabling this setting is a manual effort via SSH.
|
||||||
forcedType: bool
|
forcedType: bool
|
||||||
advanced: True
|
advanced: True
|
||||||
readonly: True
|
|
||||||
telemetryEnabled:
|
telemetryEnabled:
|
||||||
title: SOC Telemetry
|
title: SOC Telemetry
|
||||||
description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting.
|
description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting.
|
||||||
@@ -891,16 +890,12 @@ soc:
|
|||||||
suricata:
|
suricata:
|
||||||
description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id.
|
description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: string
|
|
||||||
strelka:
|
strelka:
|
||||||
description: The template used when creating a new Strelka detection.
|
description: The template used when creating a new Strelka detection.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: string
|
|
||||||
elastalert:
|
elastalert:
|
||||||
description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id.
|
description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: string
|
|
||||||
|
|
||||||
grid:
|
grid:
|
||||||
maxUploadSize:
|
maxUploadSize:
|
||||||
description: The maximum number of bytes for an uploaded PCAP import file.
|
description: The maximum number of bytes for an uploaded PCAP import file.
|
||||||
|
|||||||
+28
-40
@@ -202,10 +202,10 @@ check_service_status() {
|
|||||||
systemctl status $service_name > /dev/null 2>&1
|
systemctl status $service_name > /dev/null 2>&1
|
||||||
local status=$?
|
local status=$?
|
||||||
if [ $status -gt 0 ]; then
|
if [ $status -gt 0 ]; then
|
||||||
info "$service_name is not running"
|
info " $service_name is not running"
|
||||||
return 1;
|
return 1;
|
||||||
else
|
else
|
||||||
info "$service_name is running"
|
info " $service_name is running"
|
||||||
return 0;
|
return 0;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -1549,8 +1549,13 @@ clear_previous_setup_results() {
|
|||||||
reinstall_init() {
|
reinstall_init() {
|
||||||
info "Putting system in state to run setup again"
|
info "Putting system in state to run setup again"
|
||||||
|
|
||||||
# Always include both services. check_service_status skips units that aren't present.
|
if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|MANAGERHYPE|STANDALONE|FLEET|IMPORT)$ ]]; then
|
||||||
local salt_services=( "salt-master" "salt-minion" )
|
local salt_services=( "salt-master" "salt-minion" )
|
||||||
|
else
|
||||||
|
local salt_services=( "salt-minion" )
|
||||||
|
fi
|
||||||
|
|
||||||
|
local service_retry_count=20
|
||||||
|
|
||||||
{
|
{
|
||||||
# remove all of root's cronjobs
|
# remove all of root's cronjobs
|
||||||
@@ -1566,51 +1571,31 @@ reinstall_init() {
|
|||||||
|
|
||||||
salt-call state.apply ca.remove -linfo --local --file-root=../salt
|
salt-call state.apply ca.remove -linfo --local --file-root=../salt
|
||||||
|
|
||||||
# Stop salt services and force-kill any lingering salt processes (including orphans
|
# Kill any salt processes (safely)
|
||||||
# from an earlier reinstall attempt where the unit file is gone but processes survive)
|
|
||||||
# so dnf remove salt can run cleanly
|
|
||||||
for service in "${salt_services[@]}"; do
|
for service in "${salt_services[@]}"; do
|
||||||
|
# Stop the service in the background so we can exit after a certain amount of time
|
||||||
if check_service_status "$service"; then
|
if check_service_status "$service"; then
|
||||||
info "Stopping $service via systemctl"
|
systemctl stop "$service" &
|
||||||
systemctl stop "$service"
|
|
||||||
fi
|
fi
|
||||||
done
|
local pid=$!
|
||||||
|
|
||||||
# Unconditionally force-kill any remaining salt binaries — these may be orphaned
|
local count=0
|
||||||
# from a prior aborted reinstall (no unit file, so systemctl can't see them).
|
while check_service_status "$service"; do
|
||||||
for salt_bin in salt-master salt-minion salt-call salt-cloud; do
|
if [[ $count -gt $service_retry_count ]]; then
|
||||||
if pgrep -f "/usr/bin/${salt_bin}" > /dev/null 2>&1; then
|
echo "Could not stop $service after 1 minute, exiting setup."
|
||||||
info "Force-killing lingering $salt_bin processes"
|
|
||||||
pkill -9 -ef "/usr/bin/${salt_bin}" 2>/dev/null
|
# Stop the systemctl process trying to kill the service, show user a message, then exit setup
|
||||||
|
kill -9 $pid
|
||||||
|
fail_setup
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
# Catch stray `salt` CLI children from saltutil.kill_all_jobs / state.apply invocations
|
|
||||||
pkill -9 -ef "/usr/bin/python3 /bin/salt" 2>/dev/null
|
|
||||||
|
|
||||||
# Give the kernel a moment to reap the killed processes before dnf removes the binaries
|
sleep 5
|
||||||
local kill_wait=0
|
((count++))
|
||||||
while pgrep -f "/usr/bin/salt-" > /dev/null 2>&1; do
|
done
|
||||||
if [[ $kill_wait -gt 10 ]]; then
|
|
||||||
info "Salt processes still present after SIGKILL + 10s wait; proceeding anyway"
|
|
||||||
pgrep -af "/usr/bin/salt-" | while read -r line; do info " lingering: $line"; done
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
((kill_wait++))
|
|
||||||
done
|
done
|
||||||
|
|
||||||
# Clear the 'failed' state SIGKILL left on the units before removing the package
|
|
||||||
systemctl reset-failed salt-master.service salt-minion.service 2>/dev/null || true
|
|
||||||
|
|
||||||
# Remove all salt configs
|
# Remove all salt configs
|
||||||
dnf -y remove salt
|
rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/
|
||||||
rm -rf /etc/salt/ /var/cache/salt/
|
|
||||||
|
|
||||||
# Drop systemd's in-memory references to the now-removed units
|
|
||||||
systemctl daemon-reload
|
|
||||||
|
|
||||||
# Uninstall local Elastic Agent, if installed
|
|
||||||
elastic-agent uninstall -f
|
|
||||||
|
|
||||||
if command -v docker &> /dev/null; then
|
if command -v docker &> /dev/null; then
|
||||||
# Stop and remove all so-* containers so files can be changed with more safety
|
# Stop and remove all so-* containers so files can be changed with more safety
|
||||||
@@ -1634,7 +1619,10 @@ reinstall_init() {
|
|||||||
backup_dir /nsm/hydra "$date_string"
|
backup_dir /nsm/hydra "$date_string"
|
||||||
backup_dir /nsm/influxdb "$date_string"
|
backup_dir /nsm/influxdb "$date_string"
|
||||||
|
|
||||||
} 2>&1 | tee -a "$setup_log"
|
# Uninstall local Elastic Agent, if installed
|
||||||
|
elastic-agent uninstall -f
|
||||||
|
|
||||||
|
} >> "$setup_log" 2>&1
|
||||||
|
|
||||||
info "System reinstall init has been completed."
|
info "System reinstall init has been completed."
|
||||||
}
|
}
|
||||||
|
|||||||
+1
-1
@@ -219,7 +219,7 @@ if [ -n "$test_profile" ]; then
|
|||||||
WEBUSER=onionuser@somewhere.invalid
|
WEBUSER=onionuser@somewhere.invalid
|
||||||
WEBPASSWD1=0n10nus3r
|
WEBPASSWD1=0n10nus3r
|
||||||
WEBPASSWD2=0n10nus3r
|
WEBPASSWD2=0n10nus3r
|
||||||
NODE_DESCRIPTION="${HOSTNAME} - ${install_type} - ${MSRVIP_OFFSET}"
|
NODE_DESCRIPTION="${HOSTNAME} - ${install_type} - ${MAINIP}"
|
||||||
|
|
||||||
update_sudoers_for_testing
|
update_sudoers_for_testing
|
||||||
fi
|
fi
|
||||||
|
|||||||
Reference in New Issue
Block a user