Files
securityonion/salt/postgres/schema_pillar.sls
T
Mike Reeves 92a7bb3053 fix: get postsalt's PG-canonical pillar actually working end-to-end
Five blockers turned up the first time the so_pillar schema was applied
against a fresh standalone install. Fixing them in order:

1. 006_rls.sql ordering bug
   006 GRANTed on so_pillar.change_queue and its sequence, but the table
   isn't created until 008_change_notify.sql. 006 errored mid-file with
   "relation so_pillar.change_queue does not exist", short-circuiting the
   rest of the pillar staging chain. Moved the three change_queue grants
   into 008 alongside the table creation so each file is self-contained.

2. so_pillar_* roles unable to log in
   006 created the roles as NOLOGIN and set no password. Salt-master's
   ext_pillar (postgres) and the pg_notify_pillar engine both connect as
   so_pillar_master via TCP, so both came up with "password authentication
   failed for user so_pillar_master". Added a templated cmd.run step in
   schema_pillar.sls (so_pillar_role_login_passwords) that ALTERs all three
   roles WITH LOGIN PASSWORD pulling from secrets:pillar_master_pass — the
   same password ext_pillar_postgres.conf.jinja and the engines.conf
   pg_notify_pillar block render with.

3. Missing GRANT CONNECT ON DATABASE securityonion
   USAGE on the schema is granted in 006 but CONNECT on the database isn't.
   Engine + ext_pillar succeeded auth then died with "permission denied
   for database securityonion". Added the explicit GRANT CONNECT in 006.

4. psycopg2 missing from salt's bundled python
   /opt/saltstack/salt/bin/python3 doesn't ship psycopg by default, so
   when salt-master tries to load the pg_notify_pillar engine its
   `import psycopg2` fails inside salt's loader and the engine silently
   doesn't start (no error in the salt log — you only notice when nothing
   ever drains so_pillar.change_queue). Added a pip.installed state in
   schema_pillar.sls bound to that interpreter via bin_env.

5. engines.conf vs pg_notify_pillar_engine.conf list-replace
   Salt's master.d/*.conf merge replaces top-level lists rather than
   concatenating them. The engine config used to live in its own
   master.d/pg_notify_pillar_engine.conf with `engines: [pg_notify_pillar]`
   alongside the legacy `engines.conf` carrying `engines: [checkmine,
   pillarWatch]`. Whichever loaded last won, so the engine never showed
   up in the loaded set even when the file existed. Fold the
   pg_notify_pillar declaration into engines.conf (now jinja-rendered,
   gated on postgres:so_pillar:enabled), drop the standalone state from
   pg_notify_pillar_engine.sls, and delete the now-orphaned conf jinja.

End state validated against a live standalone-net install on the dev rig:
salt-master ext_pillar reads from so_pillar.* with no errors, the
pg_notify_pillar engine LISTENs on so_pillar_change and drains the
change_queue (134-row backlog → 0 within seconds), and a so-yaml replace
on a pillar key flows disk → PG → ext_pillar → salt pillar.get with the
new value visible after a saltutil.refresh_pillar.
2026-05-04 19:47:38 -04:00

171 lines
6.5 KiB
Plaintext

# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if 'postgres' in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Deploys the so_pillar schema (tables, views, audit triggers, secrets,
# RLS, pg_cron retention) inside the so-postgres container. Idempotent —
# every CREATE / GRANT is wrapped in IF NOT EXISTS / ON CONFLICT or DO
# blocks so re-running the state is a no-op when the schema is current.
#
# Gated on the postgres:so_pillar:enabled feature flag (default false).
# Flip to true once the postsalt branch is ready to bring ext_pillar live.
include:
- postgres.enabled
{% set so_pillar_enabled = salt['pillar.get']('postgres:so_pillar:enabled', False) %}
{% if so_pillar_enabled %}
{% set drift_enabled = salt['pillar.get']('postgres:so_pillar:drift_check_enabled', False) %}
{% set schema_dir = '/opt/so/saltstack/default/salt/postgres/files/schema/pillar' %}
# Wait for postgres to actually accept TCP connections. Same idiom as
# telegraf_users.sls. The docker_container.running state returns earlier than
# the database is ready on first init.
so_pillar_postgres_wait_ready:
cmd.run:
- name: |
for i in $(seq 1 60); do
if docker exec so-postgres pg_isready -h 127.0.0.1 -U postgres -q 2>/dev/null; then
exit 0
fi
sleep 2
done
echo "so-postgres did not accept TCP connections within 120s" >&2
exit 1
- require:
- docker_container: so-postgres
{% set sql_files = [
'001_schema.sql',
'002_views.sql',
'003_history_trigger.sql',
'004_secrets.sql',
'005_seed_roles.sql',
'006_rls.sql',
] %}
{% if drift_enabled %}
{% do sql_files.append('007_drift_pgcron.sql') %}
{% endif %}
# 008 always applies — pg_notify-driven change fan-out is what the salt-master
# pg_notify_pillar engine consumes. Without it reactor wiring sees no events.
{% do sql_files.append('008_change_notify.sql') %}
{% for sql_file in sql_files %}
so_pillar_apply_{{ sql_file | replace('.', '_') }}:
cmd.run:
- name: |
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion \
< {{ schema_dir }}/{{ sql_file }}
- require:
- cmd: so_pillar_postgres_wait_ready
{% if not loop.first %}
- cmd: so_pillar_apply_{{ sql_files[loop.index0 - 1] | replace('.', '_') }}
{% endif %}
{% endfor %}
# Set the master encryption key GUC on the secret-owner role. The key itself
# is generated by setup/so-functions::secrets_pillar() (extended for postsalt)
# and lives in /opt/so/conf/postgres/so_pillar.key (mode 0400) — never read by
# Salt itself; the value flows into PG via ALTER ROLE so it sits only in the
# server's role catalog.
so_pillar_master_key_configure:
cmd.run:
- name: |
if [ -r /opt/so/conf/postgres/so_pillar.key ]; then
KEY="$(< /opt/so/conf/postgres/so_pillar.key)"
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion <<EOSQL
ALTER ROLE so_pillar_secret_owner SET so_pillar.master_key = '$KEY';
ALTER ROLE so_pillar_master SET so_pillar.master_key = '$KEY';
ALTER ROLE so_pillar_writer SET so_pillar.master_key = '$KEY';
EOSQL
else
echo "so_pillar.key not present yet; setup/so-functions must generate it before schema_pillar.sls" >&2
exit 1
fi
- require:
- cmd: so_pillar_apply_{{ sql_files[-1] | replace('.', '_') }}
# Set login passwords on the so_pillar_* roles. 006_rls.sql creates the roles
# as NOLOGIN with no password (plain SQL can't substitute pillar values), so
# the salt-master ext_pillar and the pg_notify_pillar engine — both of which
# connect as so_pillar_master via TCP — would fail with "password
# authentication failed" without this step. The password lives in pillar
# under secrets:pillar_master_pass (generated by setup/so-functions::secrets_pillar)
# and is the same one rendered into ext_pillar_postgres.conf.jinja and the
# engines.conf pg_notify_pillar block, so all three sides agree.
so_pillar_role_login_passwords:
cmd.run:
- name: |
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion <<EOSQL
ALTER ROLE so_pillar_master WITH LOGIN PASSWORD '{{ pillar['secrets']['pillar_master_pass'] }}';
ALTER ROLE so_pillar_writer WITH LOGIN PASSWORD '{{ pillar['secrets']['pillar_master_pass'] }}';
ALTER ROLE so_pillar_secret_owner WITH LOGIN PASSWORD '{{ pillar['secrets']['pillar_master_pass'] }}';
EOSQL
- require:
- cmd: so_pillar_master_key_configure
# Install psycopg2 into salt-master's bundled python so the pg_notify_pillar
# engine module can `import psycopg2`. Without this the engine's import fails
# silently in salt's loader and the engine just never starts. salt's bundled
# python at /opt/saltstack/salt/bin/python3 doesn't ship psycopg by default.
so_pillar_psycopg2_in_salt_python:
pip.installed:
- name: psycopg2-binary
- bin_env: /opt/saltstack/salt/bin/python3
- require:
- cmd: so_pillar_role_login_passwords
# Run the importer once after the schema is in place. Idempotent — re-runs
# with no SLS edits produce zero row changes.
so_pillar_initial_import:
cmd.run:
- name: /usr/sbin/so-pillar-import --yes --reason 'schema_pillar.sls initial import'
- require:
- pip: so_pillar_psycopg2_in_salt_python
# Flip so-yaml from dual-write to PG-canonical for managed paths now that
# the schema and importer are both in place. Bootstrap files (secrets.sls,
# postgres/auth.sls, ca/init.sls, *.nodes.sls, top.sls, ...) remain on disk
# regardless because so_yaml_postgres.locate() raises SkipPath for them.
so_pillar_so_yaml_mode_dir:
file.directory:
- name: /opt/so/conf/so-yaml
- user: socore
- group: socore
- mode: '0755'
- makedirs: True
so_pillar_so_yaml_mode_postgres:
file.managed:
- name: /opt/so/conf/so-yaml/mode
- contents: postgres
- user: socore
- group: socore
- mode: '0644'
- require:
- file: so_pillar_so_yaml_mode_dir
- cmd: so_pillar_initial_import
{% else %}
so_pillar_disabled_noop:
test.nop
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}