mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-05-10 05:12:54 +02:00
3d11694d51
Two coupled changes that together let so_pillar.* be the canonical config store, with config edits driving service reloads automatically: so-yaml PG-canonical mode - Adds /opt/so/conf/so-yaml/mode (and SO_YAML_BACKEND env override) with three values: dual (legacy), postgres (PG-only for managed paths), disk (emergency rollback). Bootstrap files (secrets.sls, ca/init.sls, *.nodes.sls, top.sls, ...) stay disk-only regardless via the existing SkipPath allowlist in so_yaml_postgres.locate. - loadYaml/writeYaml/purgeFile now route to so_pillar.* in postgres mode: replace/add/get all read+write the database with no disk file ever appearing. PG failure is fatal in postgres mode (no silent fallback); dual mode preserves the prior best-effort mirror. - so_yaml_postgres gains read_yaml(path), is_pg_managed(path), and is_enabled() so so-yaml can answer "is this path PG-managed and is PG up" without reaching into private helpers. - schema_pillar.sls writes /opt/so/conf/so-yaml/mode = postgres after the importer succeeds, so flipping postgres:so_pillar:enabled flips so-yaml's behavior in lockstep with the schema being live. pg_notify-driven change fan-out - 008_change_notify.sql adds so_pillar.change_queue + an AFTER trigger on pillar_entry that enqueues the locator and pg_notifies 'so_pillar_change'. Queue is drained at-least-once so engine restarts don't lose events; pg_notify is just the wakeup signal. - New salt-master engine pg_notify_pillar.py LISTENs on the channel, drains the queue with FOR UPDATE SKIP LOCKED, debounces bursts, and fires 'so/pillar/changed' events grouped by (scope, role, minion). - Reactor so_pillar_changed.sls catches the tag and dispatches to orch.so_pillar_reload, which carries a DISPATCH map of pillar-path prefix -> (state sls, role grain set) so adding a new service to the auto-reload list is a one-line edit instead of a new reactor. - Engine + reactor wiring is gated on the same postgres:so_pillar:enabled flag as the schema and ext_pillar config so the whole stack flips on/off together. Tests: 21 new cases (112 total, all passing) covering mode resolution, PG-managed detection, and PG-canonical read/write/purge routing with the PG client stubbed.
91 lines
2.6 KiB
Plaintext
91 lines
2.6 KiB
Plaintext
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
# Elastic License 2.0.
|
|
#
|
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
|
#
|
|
# "You may not move, change, disable, or circumvent the license key functionality
|
|
# in the software, and you may not remove or obscure any functionality in the
|
|
# software that is protected by the license key."
|
|
|
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
|
{% if sls in allowed_states %}
|
|
|
|
include:
|
|
- salt.minion
|
|
- salt.master.ext_pillar_postgres
|
|
- salt.master.pg_notify_pillar_engine
|
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
|
- salt.cloud
|
|
- salt.cloud.reactor_config_hypervisor
|
|
|
|
sync_runners:
|
|
salt.runner:
|
|
- name: saltutil.sync_runners
|
|
{% endif %}
|
|
|
|
checkmine_engine:
|
|
file.managed:
|
|
- name: /etc/salt/engines/checkmine.py
|
|
- source: salt://salt/engines/master/checkmine.py
|
|
- makedirs: True
|
|
|
|
pillarWatch_engine:
|
|
file.managed:
|
|
- name: /etc/salt/engines/pillarWatch.py
|
|
- source: salt://salt/engines/master/pillarWatch.py
|
|
|
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
|
vrt_engine_config:
|
|
file.managed:
|
|
- name: /etc/salt/master.d/vrt_engine.conf
|
|
- source: salt://salt/files/vrt_engine.conf
|
|
- watch_in:
|
|
- service: salt_master_service
|
|
|
|
virtual_node_manager_engine:
|
|
file.managed:
|
|
- name: /etc/salt/engines/virtual_node_manager.py
|
|
- source: salt://salt/engines/master/virtual_node_manager.py
|
|
- watch_in:
|
|
- service: salt_master_service
|
|
|
|
virtual_power_manager_engine:
|
|
file.managed:
|
|
- name: /etc/salt/engines/virtual_power_manager.py
|
|
- source: salt://salt/engines/master/virtual_power_manager.py
|
|
- watch_in:
|
|
- service: salt_master_service
|
|
{% endif %}
|
|
|
|
engines_config:
|
|
file.managed:
|
|
- name: /etc/salt/master.d/engines.conf
|
|
- source: salt://salt/files/engines.conf
|
|
|
|
# update the bootstrap script when used for salt-cloud
|
|
salt_bootstrap_cloud:
|
|
file.managed:
|
|
- name: /opt/saltstack/salt/lib/python3.10/site-packages/salt/cloud/deploy/bootstrap-salt.sh
|
|
- source: salt://salt/scripts/bootstrap-salt.sh
|
|
- show_changes: False
|
|
|
|
salt_master_service:
|
|
service.running:
|
|
- name: salt-master
|
|
- enable: True
|
|
- watch:
|
|
- file: checkmine_engine
|
|
- file: pillarWatch_engine
|
|
- file: engines_config
|
|
- order: last
|
|
|
|
{% else %}
|
|
|
|
{{sls}}_state_not_allowed:
|
|
test.fail_without_changes:
|
|
- name: {{sls}}_state_not_allowed
|
|
|
|
{% endif %}
|