# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} {% if 'postgres' in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} # Deploys the so_pillar schema (tables, views, audit triggers, secrets, # RLS, pg_cron retention) inside the so-postgres container. Idempotent — # every CREATE / GRANT is wrapped in IF NOT EXISTS / ON CONFLICT or DO # blocks so re-running the state is a no-op when the schema is current. # # Gated on the postgres:so_pillar:enabled feature flag (default false). # Flip to true once the postsalt branch is ready to bring ext_pillar live. include: - postgres.enabled {% set so_pillar_enabled = salt['pillar.get']('postgres:so_pillar:enabled', False) %} {% if so_pillar_enabled %} {% set drift_enabled = salt['pillar.get']('postgres:so_pillar:drift_check_enabled', False) %} {% set schema_dir = '/opt/so/saltstack/default/salt/postgres/files/schema/pillar' %} # Wait for postgres to actually accept TCP connections. Same idiom as # telegraf_users.sls. The docker_container.running state returns earlier than # the database is ready on first init. so_pillar_postgres_wait_ready: cmd.run: - name: | for i in $(seq 1 60); do if docker exec so-postgres pg_isready -h 127.0.0.1 -U postgres -q 2>/dev/null; then exit 0 fi sleep 2 done echo "so-postgres did not accept TCP connections within 120s" >&2 exit 1 - require: - docker_container: so-postgres {% set sql_files = [ '001_schema.sql', '002_views.sql', '003_history_trigger.sql', '004_secrets.sql', '005_seed_roles.sql', '006_rls.sql', ] %} {% if drift_enabled %} {% do sql_files.append('007_drift_pgcron.sql') %} {% endif %} # 008 always applies — pg_notify-driven change fan-out is what the salt-master # pg_notify_pillar engine consumes. Without it reactor wiring sees no events. {% do sql_files.append('008_change_notify.sql') %} {% for sql_file in sql_files %} so_pillar_apply_{{ sql_file | replace('.', '_') }}: cmd.run: - name: | docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion \ < {{ schema_dir }}/{{ sql_file }} - require: - cmd: so_pillar_postgres_wait_ready {% if not loop.first %} - cmd: so_pillar_apply_{{ sql_files[loop.index0 - 1] | replace('.', '_') }} {% endif %} {% endfor %} # Set the master encryption key GUC on the secret-owner role. The key itself # is generated by setup/so-functions::secrets_pillar() (extended for postsalt) # and lives in /opt/so/conf/postgres/so_pillar.key (mode 0400) — never read by # Salt itself; the value flows into PG via ALTER ROLE so it sits only in the # server's role catalog. so_pillar_master_key_configure: cmd.run: - name: | if [ -r /opt/so/conf/postgres/so_pillar.key ]; then KEY="$(< /opt/so/conf/postgres/so_pillar.key)" docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion <&2 exit 1 fi - require: - cmd: so_pillar_apply_{{ sql_files[-1] | replace('.', '_') }} # Set login passwords on the so_pillar_* roles. 006_rls.sql creates the roles # as NOLOGIN with no password (plain SQL can't substitute pillar values), so # the salt-master ext_pillar and the pg_notify_pillar engine — both of which # connect as so_pillar_master via TCP — would fail with "password # authentication failed" without this step. The password lives in pillar # under secrets:pillar_master_pass (generated by setup/so-functions::secrets_pillar) # and is the same one rendered into ext_pillar_postgres.conf.jinja and the # engines.conf pg_notify_pillar block, so all three sides agree. so_pillar_role_login_passwords: cmd.run: - name: | docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d securityonion </opt/so/log/so_pillar/psycopg2_install.log 2>&1 \ || true - unless: /opt/saltstack/salt/bin/python3 -c "import psycopg2" - require: - cmd: so_pillar_role_login_passwords # Run the importer once after the schema is in place. Idempotent — re-runs # with no SLS edits produce zero row changes. so_pillar_initial_import: cmd.run: - name: /usr/sbin/so-pillar-import --yes --reason 'schema_pillar.sls initial import' - require: - cmd: so_pillar_psycopg2_in_salt_python # Flip so-yaml from dual-write to PG-canonical for managed paths now that # the schema and importer are both in place. Bootstrap files (secrets.sls, # postgres/auth.sls, ca/init.sls, *.nodes.sls, top.sls, ...) remain on disk # regardless because so_yaml_postgres.locate() raises SkipPath for them. so_pillar_so_yaml_mode_dir: file.directory: - name: /opt/so/conf/so-yaml - user: socore - group: socore - mode: '0755' - makedirs: True so_pillar_so_yaml_mode_postgres: file.managed: - name: /opt/so/conf/so-yaml/mode - contents: postgres - user: socore - group: socore - mode: '0644' - require: - file: so_pillar_so_yaml_mode_dir - cmd: so_pillar_initial_import {% else %} so_pillar_disabled_noop: test.nop {% endif %} {% else %} {{sls}}_state_not_allowed: test.fail_without_changes: - name: {{sls}}_state_not_allowed {% endif %}