mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-05-06 19:38:51 +02:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 652ac5d61f | |||
| f888a2ba6b | |||
| 8a1ee02335 | |||
| 192f6cfe13 | |||
| 5bca81d833 | |||
| 1c6574c694 | |||
| b701664e04 | |||
| bc64f1431d | |||
| 2203037ce7 | |||
| 77a4ad877e | |||
| 702b3585cc | |||
| 86966d2778 | |||
| ce3ad3a895 | |||
| 3a4b7b50de | |||
| 39d0947102 | |||
| 0085d9a353 | |||
| 2f01ce3b23 | |||
| 71b19c1b5f | |||
| 82f70bb53a | |||
| 2dcded6cca |
@@ -227,7 +227,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint|armis|o365_metrics|microsoft_sentinel|snyk).*user so_kibana lacks the required permissions \[(logs|metrics)-\1" # Known issue with integrations starting transform jobs that are explicitly not allowed to start as a system user. (installed as so_elastic / so_kibana)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint|armis|o365_metrics|microsoft_sentinel|snyk|cyera|island_browser).*user so_kibana lacks the required permissions \[(logs|metrics)-\1" # Known issue with integrations starting transform jobs that are explicitly not allowed to start as a system user. This error should not be seen on fresh ES 9.3.3 installs or after SO 3.1.0 with soups addition of check_transform_health_and_reauthorize()
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1
|
||||
fi
|
||||
|
||||
|
||||
@@ -51,6 +51,16 @@ so-elastic-fleet-package-registry:
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
wait_for_so-elastic-fleet-package-registry:
|
||||
http.wait_for_successful_query:
|
||||
- name: "http://localhost:8080/health"
|
||||
- status: 200
|
||||
- wait_for: 300
|
||||
- request_interval: 15
|
||||
- require:
|
||||
- docker_container: so-elastic-fleet-package-registry
|
||||
|
||||
delete_so-elastic-fleet-package-registry_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
@@ -18,17 +18,6 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
|
||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
- x509: elasticfleet_kafka_crt
|
||||
{% endif %}
|
||||
|
||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||
|
||||
@@ -240,7 +240,7 @@ elastic_fleet_policy_create() {
|
||||
--arg DESC "$DESC" \
|
||||
--arg TIMEOUT $TIMEOUT \
|
||||
--arg FLEETSERVER "$FLEETSERVER" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER,"advanced_settings":{"agent_logging_level": "warning"}}'
|
||||
)
|
||||
# Create Fleet Policy
|
||||
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
|
||||
@@ -485,6 +485,130 @@ elasticsearch_backup_index_templates() {
|
||||
tar -czf /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz -C /opt/so/conf/elasticsearch/templates/index/ .
|
||||
}
|
||||
|
||||
elasticfleet_set_agent_logging_level_warn() {
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
local current_agent_policies
|
||||
if ! current_agent_policies=$(fleet_api "agent_policies?perPage=1000"); then
|
||||
echo "Warning: unable to retrieve Fleet agent policies"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Only updating policies that are within Security Onion defaults and do not already have any user configured advanced_settings.
|
||||
local policies_to_update
|
||||
policies_to_update=$(jq -c '
|
||||
.items[]
|
||||
| select(has("advanced_settings") | not)
|
||||
| select(
|
||||
.id == "so-grid-nodes_general"
|
||||
or .id == "so-grid-nodes_heavy"
|
||||
or .id == "endpoints-initial"
|
||||
or (.id | startswith("FleetServer_"))
|
||||
)
|
||||
' <<< "$current_agent_policies")
|
||||
|
||||
if [[ -z "$policies_to_update" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
while IFS= read -r policy; do
|
||||
[[ -z "$policy" ]] && continue
|
||||
|
||||
local policy_id policy_name policy_namespace
|
||||
policy_id=$(jq -r '.id' <<< "$policy")
|
||||
policy_name=$(jq -r '.name' <<< "$policy")
|
||||
policy_namespace=$(jq -r '.namespace' <<< "$policy")
|
||||
|
||||
local update_logging
|
||||
update_logging=$(jq -n \
|
||||
--arg name "$policy_name" \
|
||||
--arg namespace "$policy_namespace" \
|
||||
'{name: $name, namespace: $namespace, advanced_settings: {agent_logging_level: "warning"}}'
|
||||
)
|
||||
|
||||
echo "Setting elastic agent_logging_level to warning on policy '$policy_name' ($policy_id)."
|
||||
if ! fleet_api "agent_policies/$policy_id" -XPUT -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$update_logging" >/dev/null; then
|
||||
echo " warning: failed to update agent policy '$policy_name' ($policy_id)" >&2
|
||||
fi
|
||||
done <<< "$policies_to_update"
|
||||
}
|
||||
|
||||
check_transform_health_and_reauthorize() {
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
echo "Checking integration transform jobs for unhealthy / unauthorized status..."
|
||||
|
||||
local transforms_doc stats_doc installed_doc
|
||||
if ! transforms_doc=$(so-elasticsearch-query "_transform/_all?size=1000" --fail --retry 3 --retry-delay 5 2>/dev/null); then
|
||||
echo "Unable to query for transform jobs, skipping reauthorization."
|
||||
return 0
|
||||
fi
|
||||
if ! stats_doc=$(so-elasticsearch-query "_transform/_all/_stats?size=1000" --fail --retry 3 --retry-delay 5 2>/dev/null); then
|
||||
echo "Unable to query for transform job stats, skipping reauthorization."
|
||||
return 0
|
||||
fi
|
||||
if ! installed_doc=$(fleet_api "epm/packages/installed?perPage=500"); then
|
||||
echo "Unable to list installed Fleet packages, skipping reauthorization."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Get all transforms that meet the following
|
||||
# - unhealthy (any non-green health status)
|
||||
# - metadata has run_as_kibana_system: false (this fix is specific to transforms started prior to Kibana 9.3.3)
|
||||
# - are not orphaned (integration is not somehow missing/corrupt/uninstalled)
|
||||
local unhealthy_transforms
|
||||
unhealthy_transforms=$(jq -c -n \
|
||||
--argjson t "$transforms_doc" \
|
||||
--argjson s "$stats_doc" \
|
||||
--argjson i "$installed_doc" '
|
||||
($i.items | map({key: .name, value: .version}) | from_entries) as $pkg_ver
|
||||
| ($s.transforms | map({key: .id, value: .health.status}) | from_entries) as $health
|
||||
| [ $t.transforms[]
|
||||
| select(._meta.run_as_kibana_system == false)
|
||||
| select(($health[.id] // "unknown") != "green")
|
||||
| {id, pkg: ._meta.package.name, ver: ($pkg_ver[._meta.package.name])}
|
||||
]
|
||||
| if length == 0 then empty else . end
|
||||
| (map(select(.ver == null)) | map({orphan: .id})[]),
|
||||
(map(select(.ver != null))
|
||||
| group_by(.pkg)
|
||||
| map({pkg: .[0].pkg, ver: .[0].ver, transformIds: map(.id)})[])
|
||||
')
|
||||
|
||||
if [[ -z "$unhealthy_transforms" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local unhealthy_count
|
||||
unhealthy_count=$(jq -s '[.[].transformIds? // empty | .[]] | length' <<< "$unhealthy_transforms")
|
||||
echo "Found $unhealthy_count transform(s) needing reauthorization."
|
||||
|
||||
local total_failures=0
|
||||
while IFS= read -r transform; do
|
||||
[[ -z "$transform" ]] && continue
|
||||
if jq -e 'has("orphan")' <<< "$transform" >/dev/null 2>&1; then
|
||||
echo "Skipping transform not owned by any installed Fleet package: $(jq -r '.orphan' <<< "$transform")"
|
||||
continue
|
||||
fi
|
||||
|
||||
local pkg ver body resp
|
||||
pkg=$(jq -r '.pkg' <<< "$transform")
|
||||
ver=$(jq -r '.ver' <<< "$transform")
|
||||
body=$(jq -c '{transforms: (.transformIds | map({transformId: .}))}' <<< "$transform")
|
||||
|
||||
echo "Reauthorizing transform(s) for ${pkg}-${ver}..."
|
||||
resp=$(fleet_api "epm/packages/${pkg}/${ver}/transforms/authorize" \
|
||||
-XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
|
||||
-d "$body") || { echo "Could not reauthorize transform(s) for ${pkg}-${ver}"; continue; }
|
||||
|
||||
(( total_failures += $(jq 'map(select(.success != true)) | length' <<< "$resp" 2>/dev/null) ))
|
||||
done <<< "$unhealthy_transforms"
|
||||
|
||||
if [[ "$total_failures" -gt 0 ]]; then
|
||||
echo "Some transform(s) failed to reauthorize."
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_postgres_local_pillar() {
|
||||
# Postgres was added as a service after 3.0.0, so the new pillar/top.sls
|
||||
# references postgres.soc_postgres / postgres.adv_postgres unconditionally.
|
||||
@@ -553,6 +677,12 @@ post_to_3.1.0() {
|
||||
# file_roots of its own and --local would fail with "No matching sls found".
|
||||
salt-call state.apply postgres.telegraf_users queue=True || true
|
||||
|
||||
# Update default agent policies to use logging level warn.
|
||||
elasticfleet_set_agent_logging_level_warn || true
|
||||
|
||||
# Check for unhealthy / unauthorized integration transform jobs and attempt reauthorizations
|
||||
check_transform_health_and_reauthorize || true
|
||||
|
||||
POSTVERSION=3.1.0
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,14 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set hypervisor = pillar.minion_id %}
|
||||
{% set hypervisor = pillar.get('minion_id', '') %}
|
||||
|
||||
{% if not hypervisor|regex_match('^([A-Za-z0-9._-]{1,253})$') %}
|
||||
{% do salt.log.error('delete_hypervisor_orch: refusing unsafe minion_id=' ~ hypervisor) %}
|
||||
delete_hypervisor_invalid_minion_id:
|
||||
test.fail_without_changes:
|
||||
- name: delete_hypervisor_invalid_minion_id
|
||||
{% else %}
|
||||
|
||||
ensure_hypervisor_mine_deleted:
|
||||
salt.function:
|
||||
@@ -20,3 +27,5 @@ update_salt_cloud_profile:
|
||||
- sls:
|
||||
- salt.cloud.config
|
||||
- concurrent: True
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -12,7 +12,14 @@
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
{% do salt.log.debug('vm_pillar_clean_orch: Running') %}
|
||||
{% set vm_name = pillar.get('vm_name') %}
|
||||
{% set vm_name = pillar.get('vm_name', '') %}
|
||||
|
||||
{% if not vm_name|regex_match('^([A-Za-z0-9._-]{1,253})$') %}
|
||||
{% do salt.log.error('vm_pillar_clean_orch: refusing unsafe vm_name=' ~ vm_name) %}
|
||||
vm_pillar_clean_invalid_name:
|
||||
test.fail_without_changes:
|
||||
- name: vm_pillar_clean_invalid_name
|
||||
{% else %}
|
||||
|
||||
delete_adv_{{ vm_name }}_pillar:
|
||||
module.run:
|
||||
@@ -24,6 +31,8 @@ delete_{{ vm_name }}_pillar:
|
||||
- file.remove:
|
||||
- path: /opt/so/saltstack/local/pillar/minions/{{ vm_name }}.sls
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% do salt.log.error(
|
||||
|
||||
@@ -3,12 +3,15 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% if data['id'].endswith('_hypervisor') and data['result'] == True %}
|
||||
{% set hid = data['id'] %}
|
||||
{% if hid|regex_match('^([A-Za-z0-9._-]{1,253})$')
|
||||
and hid.endswith('_hypervisor')
|
||||
and data['result'] == True %}
|
||||
|
||||
{% if data['act'] == 'accept' %}
|
||||
check_and_trigger:
|
||||
runner.setup_hypervisor.setup_environment:
|
||||
- minion_id: {{ data['id'] }}
|
||||
- minion_id: {{ hid }}
|
||||
{% endif %}
|
||||
|
||||
{% if data['act'] == 'delete' %}
|
||||
@@ -17,8 +20,7 @@ delete_hypervisor:
|
||||
- args:
|
||||
- mods: orch.delete_hypervisor
|
||||
- pillar:
|
||||
minion_id: {{ data['id'] }}
|
||||
minion_id: {{ hid }}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!py
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
@@ -9,30 +9,42 @@ import logging
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import re
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
PILLAR_ROOT = '/opt/so/saltstack/local/pillar/minions/'
|
||||
_VMNAME_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
|
||||
|
||||
|
||||
def run():
|
||||
vm_name = data['kwargs']['name']
|
||||
logging.error("createEmptyPillar reactor: vm_name: %s" % vm_name)
|
||||
pillar_root = '/opt/so/saltstack/local/pillar/minions/'
|
||||
vm_name = data.get('kwargs', {}).get('name', '')
|
||||
if not _VMNAME_RE.match(str(vm_name)):
|
||||
log.error("createEmptyPillar reactor: refusing unsafe vm_name=%r", vm_name)
|
||||
return {}
|
||||
|
||||
log.info("createEmptyPillar reactor: vm_name: %s", vm_name)
|
||||
pillar_files = ['adv_' + vm_name + '.sls', vm_name + '.sls']
|
||||
|
||||
try:
|
||||
# Get socore user and group IDs
|
||||
socore_uid = pwd.getpwnam('socore').pw_uid
|
||||
socore_gid = grp.getgrnam('socore').gr_gid
|
||||
pillar_root_real = os.path.realpath(PILLAR_ROOT)
|
||||
|
||||
for f in pillar_files:
|
||||
full_path = pillar_root + f
|
||||
if not os.path.exists(full_path):
|
||||
# Create empty file
|
||||
os.mknod(full_path)
|
||||
# Set ownership to socore:socore
|
||||
os.chown(full_path, socore_uid, socore_gid)
|
||||
# Set mode to 644 (rw-r--r--)
|
||||
os.chmod(full_path, 0o640)
|
||||
logging.error("createEmptyPillar reactor: created %s with socore:socore ownership and mode 644" % f)
|
||||
full_path = os.path.join(PILLAR_ROOT, f)
|
||||
resolved = os.path.realpath(full_path)
|
||||
if os.path.dirname(resolved) != pillar_root_real:
|
||||
log.error("createEmptyPillar reactor: refusing path outside pillar root: %s", resolved)
|
||||
continue
|
||||
if os.path.exists(resolved):
|
||||
continue
|
||||
os.mknod(resolved)
|
||||
os.chown(resolved, socore_uid, socore_gid)
|
||||
os.chmod(resolved, 0o640)
|
||||
log.info("createEmptyPillar reactor: created %s with socore:socore ownership and mode 0640", f)
|
||||
|
||||
except (KeyError, OSError) as e:
|
||||
logging.error("createEmptyPillar reactor: Error setting ownership/permissions: %s" % str(e))
|
||||
log.error("createEmptyPillar reactor: Error setting ownership/permissions: %s", e)
|
||||
|
||||
return {}
|
||||
|
||||
+33
-11
@@ -1,18 +1,40 @@
|
||||
#!py
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
remove_key:
|
||||
wheel.key.delete:
|
||||
- args:
|
||||
- match: {{ data['name'] }}
|
||||
import logging
|
||||
import re
|
||||
|
||||
{{ data['name'] }}_pillar_clean:
|
||||
runner.state.orchestrate:
|
||||
- args:
|
||||
- mods: orch.vm_pillar_clean
|
||||
- pillar:
|
||||
vm_name: {{ data['name'] }}
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
{% do salt.log.info('deleteKey reactor: deleted minion key: %s' % data['name']) %}
|
||||
_VMNAME_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
|
||||
|
||||
|
||||
def run():
|
||||
name = data.get('name', '')
|
||||
if not _VMNAME_RE.match(str(name)):
|
||||
log.error("deleteKey reactor: refusing unsafe name=%r", name)
|
||||
return {}
|
||||
|
||||
log.info("deleteKey reactor: deleted minion key: %s", name)
|
||||
|
||||
return {
|
||||
'remove_key': {
|
||||
'wheel.key.delete': [
|
||||
{'args': [
|
||||
{'match': name},
|
||||
]},
|
||||
],
|
||||
},
|
||||
'%s_pillar_clean' % name: {
|
||||
'runner.state.orchestrate': [
|
||||
{'args': [
|
||||
{'mods': 'orch.vm_pillar_clean'},
|
||||
{'pillar': {'vm_name': name}},
|
||||
]},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
@@ -24,11 +24,6 @@
|
||||
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
||||
|
||||
{% if GLOBALS.postgres is defined and GLOBALS.postgres.auth is defined %}
|
||||
{% set PG_ADMIN_PASS = salt['pillar.get']('secrets:postgres_pass', '') %}
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.update({'postgres': {'hostUrl': GLOBALS.manager_ip, 'port': 5432, 'username': GLOBALS.postgres.auth.users.so_postgres_user.user, 'password': GLOBALS.postgres.auth.users.so_postgres_user.pass, 'adminUser': 'postgres', 'adminPassword': PG_ADMIN_PASS, 'dbname': 'securityonion', 'sslMode': 'require', 'assistantEnabled': true, 'esHostUrl': 'https://' ~ GLOBALS.manager_ip ~ ':9200', 'esUsername': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'esPassword': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass, 'esVerifyCert': false}}) %}
|
||||
{% endif %}
|
||||
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %}
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'token': INFLUXDB_TOKEN}) %}
|
||||
{% for tool in SOCDEFAULTS.soc.config.server.client.tools %}
|
||||
|
||||
@@ -15,7 +15,7 @@ from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
|
||||
with open("/opt/so/conf/strelka/filecheck.yaml", "r") as ymlfile:
|
||||
cfg = yaml.load(ymlfile, Loader=yaml.Loader)
|
||||
cfg = yaml.safe_load(ymlfile)
|
||||
|
||||
extract_path = cfg["filecheck"]["extract_path"]
|
||||
historypath = cfg["filecheck"]["historypath"]
|
||||
|
||||
@@ -1701,6 +1701,24 @@ remove_package() {
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_pyyaml() {
|
||||
title "Ensuring python3-pyyaml is installed"
|
||||
if rpm -q python3-pyyaml >/dev/null 2>&1; then
|
||||
info "python3-pyyaml already installed"
|
||||
return 0
|
||||
fi
|
||||
info "python3-pyyaml not found, attempting to install"
|
||||
set -o pipefail
|
||||
dnf -y install python3-pyyaml 2>&1 | tee -a "$setup_log"
|
||||
local result=$?
|
||||
set +o pipefail
|
||||
if [[ $result -ne 0 ]] || ! rpm -q python3-pyyaml >/dev/null 2>&1; then
|
||||
error "Failed to install python3-pyyaml (exit=$result)"
|
||||
fail_setup
|
||||
fi
|
||||
info "python3-pyyaml installed successfully"
|
||||
}
|
||||
|
||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml
|
||||
# CAUTION! SALT VERSION UDDATES - READ BELOW
|
||||
# When updating the salt version, also update the version in:
|
||||
|
||||
@@ -66,6 +66,9 @@ set_timezone
|
||||
# Let's see what OS we are dealing with here
|
||||
detect_os
|
||||
|
||||
# Ensure python3-pyyaml is available before any code that may need so-yaml/PyYAML
|
||||
ensure_pyyaml
|
||||
|
||||
|
||||
# Check to see if this is the setup type of "desktop".
|
||||
is_desktop=
|
||||
|
||||
Reference in New Issue
Block a user