Files
securityonion/salt/postgres/schema_pillar.sls
T
Mike Reeves a7efabd90d fix: tolerate pip's non-zero exit on psycopg2 patchelf step
salt's pip.installed flagged so_pillar_psycopg2_in_salt_python as
failed because pip exits non-zero when it can't find the patchelf
binary to rewrite the psycopg2 wheel's RPATH after extraction. The
wheel is fully installed and importable regardless — the patchelf
step is a cosmetic post-install rewrite, not a build dependency. But
salt's failure cascade then short-circuited so_pillar_initial_import
and the so-yaml mode flip, leaving the install in dual-pillar mode
instead of PG-canonical.

Replaced with cmd.run that runs pip with `|| true` and uses an
`import psycopg2` check as the actual readiness gate — same idea as
how salt's own bootstrap does it. Also fixed the require: ref on
so_pillar_initial_import (was `pip:`, needs to be `cmd:` for the new
state type).
2026-05-04 22:08:31 -04:00

178 lines
7.0 KiB
Plaintext

# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if 'postgres' in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Deploys the so_pillar schema (tables, views, audit triggers, secrets,
# RLS, pg_cron retention) inside the so-postgres container. Idempotent —
# every CREATE / GRANT is wrapped in IF NOT EXISTS / ON CONFLICT or DO
# blocks so re-running the state is a no-op when the schema is current.
#
# Gated on the postgres:so_pillar:enabled feature flag (default false).
# Flip to true once the postsalt branch is ready to bring ext_pillar live.
include:
- postgres.enabled
{% set so_pillar_enabled = salt['pillar.get']('postgres:so_pillar:enabled', False) %}
{% if so_pillar_enabled %}
{% set drift_enabled = salt['pillar.get']('postgres:so_pillar:drift_check_enabled', False) %}
{% set schema_dir = '/opt/so/saltstack/default/salt/postgres/files/schema/pillar' %}
# Wait for postgres to actually accept TCP connections. Same idiom as
# telegraf_users.sls. The docker_container.running state returns earlier than
# the database is ready on first init.
so_pillar_postgres_wait_ready:
cmd.run:
- name: |
for i in $(seq 1 60); do
if docker exec so-postgres pg_isready -h 127.0.0.1 -U postgres -q 2>/dev/null; then
exit 0
fi
sleep 2
done
echo "so-postgres did not accept TCP connections within 120s" >&2
exit 1
- require:
- docker_container: so-postgres
{% set sql_files = [
'001_schema.sql',
'002_views.sql',
'003_history_trigger.sql',
'004_secrets.sql',
'005_seed_roles.sql',
'006_rls.sql',
] %}
{% if drift_enabled %}
{% do sql_files.append('007_drift_pgcron.sql') %}
{% endif %}
# 008 always applies — pg_notify-driven change fan-out is what the salt-master
# pg_notify_pillar engine consumes. Without it reactor wiring sees no events.
{% do sql_files.append('008_change_notify.sql') %}
{% for sql_file in sql_files %}
so_pillar_apply_{{ sql_file | replace('.', '_') }}:
cmd.run:
- name: |
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion \
< {{ schema_dir }}/{{ sql_file }}
- require:
- cmd: so_pillar_postgres_wait_ready
{% if not loop.first %}
- cmd: so_pillar_apply_{{ sql_files[loop.index0 - 1] | replace('.', '_') }}
{% endif %}
{% endfor %}
# Set the master encryption key GUC on the secret-owner role. The key itself
# is generated by setup/so-functions::secrets_pillar() (extended for postsalt)
# and lives in /opt/so/conf/postgres/so_pillar.key (mode 0400) — never read by
# Salt itself; the value flows into PG via ALTER ROLE so it sits only in the
# server's role catalog.
so_pillar_master_key_configure:
cmd.run:
- name: |
if [ -r /opt/so/conf/postgres/so_pillar.key ]; then
KEY="$(< /opt/so/conf/postgres/so_pillar.key)"
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion <<EOSQL
ALTER ROLE so_pillar_secret_owner SET so_pillar.master_key = '$KEY';
ALTER ROLE so_pillar_master SET so_pillar.master_key = '$KEY';
ALTER ROLE so_pillar_writer SET so_pillar.master_key = '$KEY';
EOSQL
else
echo "so_pillar.key not present yet; setup/so-functions must generate it before schema_pillar.sls" >&2
exit 1
fi
- require:
- cmd: so_pillar_apply_{{ sql_files[-1] | replace('.', '_') }}
# Set login passwords on the so_pillar_* roles. 006_rls.sql creates the roles
# as NOLOGIN with no password (plain SQL can't substitute pillar values), so
# the salt-master ext_pillar and the pg_notify_pillar engine — both of which
# connect as so_pillar_master via TCP — would fail with "password
# authentication failed" without this step. The password lives in pillar
# under secrets:pillar_master_pass (generated by setup/so-functions::secrets_pillar)
# and is the same one rendered into ext_pillar_postgres.conf.jinja and the
# engines.conf pg_notify_pillar block, so all three sides agree.
so_pillar_role_login_passwords:
cmd.run:
- name: |
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion <<EOSQL
ALTER ROLE so_pillar_master WITH LOGIN PASSWORD '{{ pillar['secrets']['pillar_master_pass'] }}';
ALTER ROLE so_pillar_writer WITH LOGIN PASSWORD '{{ pillar['secrets']['pillar_master_pass'] }}';
ALTER ROLE so_pillar_secret_owner WITH LOGIN PASSWORD '{{ pillar['secrets']['pillar_master_pass'] }}';
EOSQL
- require:
- cmd: so_pillar_master_key_configure
# Install psycopg2 into salt-master's bundled python so the pg_notify_pillar
# engine module can `import psycopg2`. Without this the engine's import fails
# silently in salt's loader and the engine just never starts. salt's bundled
# python at /opt/saltstack/salt/bin/python3 doesn't ship psycopg by default.
#
# Uses cmd.run with an `unless` import-test rather than pip.installed because
# pip exits non-zero if patchelf isn't on PATH (it tries to rewrite the
# psycopg2 wheel's RPATH after extraction), even though the wheel is fully
# installed and importable. salt's pip.installed surfaces the non-zero exit
# as a state failure and the cascade kills schema_pillar's downstream work.
# `import psycopg2` succeeds either way, so that's the actual readiness gate.
so_pillar_psycopg2_in_salt_python:
cmd.run:
- name: /opt/saltstack/salt/bin/pip3 install --quiet psycopg2-binary || true
- unless: /opt/saltstack/salt/bin/python3 -c "import psycopg2"
- require:
- cmd: so_pillar_role_login_passwords
# Run the importer once after the schema is in place. Idempotent — re-runs
# with no SLS edits produce zero row changes.
so_pillar_initial_import:
cmd.run:
- name: /usr/sbin/so-pillar-import --yes --reason 'schema_pillar.sls initial import'
- require:
- cmd: so_pillar_psycopg2_in_salt_python
# Flip so-yaml from dual-write to PG-canonical for managed paths now that
# the schema and importer are both in place. Bootstrap files (secrets.sls,
# postgres/auth.sls, ca/init.sls, *.nodes.sls, top.sls, ...) remain on disk
# regardless because so_yaml_postgres.locate() raises SkipPath for them.
so_pillar_so_yaml_mode_dir:
file.directory:
- name: /opt/so/conf/so-yaml
- user: socore
- group: socore
- mode: '0755'
- makedirs: True
so_pillar_so_yaml_mode_postgres:
file.managed:
- name: /opt/so/conf/so-yaml/mode
- contents: postgres
- user: socore
- group: socore
- mode: '0644'
- require:
- file: so_pillar_so_yaml_mode_dir
- cmd: so_pillar_initial_import
{% else %}
so_pillar_disabled_noop:
test.nop
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}