Compare commits

...

30 Commits

Author SHA1 Message Date
Josh Patterson 1281f0ee37 Merge remote-tracking branch 'origin/3/dev' into saltthangs 2026-05-06 09:46:12 -04:00
Josh Patterson 4bc19f91ce Merge pull request #15867 from Security-Onion-Solutions/fixhype
sanitize minion ids for hypervisor reactors / orchestration
2026-05-06 09:46:01 -04:00
Josh Patterson f774334b6c Merge remote-tracking branch 'origin/3/dev' into saltthangs 2026-05-06 08:16:41 -04:00
Mike Reeves 4990d0ddea Merge pull request #15866 from Security-Onion-Solutions/management-bond1
Management bond1
2026-05-05 17:17:58 -04:00
Mike Reeves 3e49322220 Allow preconfigured management bond in requirements 2026-05-05 15:35:12 -04:00
Mike Reeves ecb92d43fc Limit management bond setup to ISO installs 2026-05-05 15:30:09 -04:00
Mike Reeves 3b714db0bf Show management bond option consistently 2026-05-05 15:22:40 -04:00
Mike Reeves f17da4e68b Add management bond setup option 2026-05-05 15:13:24 -04:00
Jorge Reyes 04cfc22e3f Merge pull request #15864 from Security-Onion-Solutions/reyesj2/patch-2
update grok type conversion to convert processor
2026-05-05 13:58:39 -05:00
reyesj2 dceed421ae update grok type conversion to convert processor 2026-05-05 13:41:00 -05:00
Josh Patterson 652ac5d61f fix regex 2026-05-05 14:26:04 -04:00
Josh Patterson f888a2ba6b Merge remote-tracking branch 'origin/3/dev' into fixhype 2026-05-05 10:28:49 -04:00
Mike Reeves 8a1ee02335 Merge pull request #15846 from Security-Onion-Solutions/feature/ensure-pyyaml
Ensure python3-pyyaml is installed before continuing setup
2026-05-05 10:24:25 -04:00
Josh Patterson 192f6cfe13 Merge remote-tracking branch 'origin/3/dev' into fixhype 2026-05-05 08:18:26 -04:00
Mike Reeves 5bca81d833 Merge pull request #15858 from Security-Onion-Solutions/security-fix
Fix unsafe PyYAML load in filecheck
2026-05-04 16:16:40 -04:00
Josh Patterson 1c6574c694 ensure minion ids 2026-05-04 14:03:14 -04:00
Jorge Reyes bc64f1431d Merge pull request #15857 from Security-Onion-Solutions/reyesj2/package-registry-health
fleet package registry health check
2026-05-04 11:05:23 -05:00
reyesj2 2203037ce7 fleet package registry health check 2026-05-04 10:52:37 -05:00
Josh Patterson 7fcace34c4 add sensoroni to push map 2026-04-30 16:09:08 -04:00
Josh Patterson 9541024eb7 fix broken things 2026-04-30 15:35:24 -04:00
Mike Reeves 3a4b7b50de ensure python3-pyyaml is installed before continuing setup 2026-04-30 10:15:09 -04:00
Josh Patterson 0d166ef732 remove trailing slashes 2026-04-30 09:53:00 -04:00
Josh Patterson f7d2994f8b filter temp files 2026-04-30 09:16:22 -04:00
Josh Patterson 8f0757606d include salt..minion 2026-04-29 16:42:19 -04:00
Josh Patterson 0a8f2e01a0 install pyinotify 2026-04-29 16:41:56 -04:00
Josh Patterson 4546d7bc52 Merge remote-tracking branch 'origin/3/dev' into saltthangs 2026-04-29 14:28:19 -04:00
Josh Patterson 17849d8758 Merge remote-tracking branch 'origin/3/dev' into saltthangs 2026-04-28 15:49:22 -04:00
Josh Patterson d3d30a587c Merge remote-tracking branch 'origin/3/dev' into saltthangs 2026-04-28 15:30:31 -04:00
Josh Patterson 034711d148 Merge remote-tracking branch 'origin/3/dev' into saltthangs 2026-04-28 10:47:29 -04:00
Mike Reeves a0cf0489d6 reduce highstate frequency with active push for rules and pillars
- schedule highstate every 2 hours (was 15 minutes); interval lives in
  global:push:highstate_interval_hours so the SOC admin UI can tune it and
  so-salt-minion-check derives its threshold as (interval + 1) * 3600
- add inotify beacon on the manager + master reactor + orch.push_batch that
  writes per-app intent files, with a so-push-drainer schedule on the manager
  that debounces, dedupes, and dispatches a single orchestration
- pillar_push_map.yaml allowlists the apps whose pillar changes trigger an
  immediate targeted state.apply (targets verified against salt/top.sls);
  edits under pillar/minions/ trigger a state.highstate on that one minion
- host-batch every push orchestration (batch: 25%, batch_wait: 15) so rule
  changes don't thundering-herd large fleets
- new global:push:enabled kill-switch tears down the beacon, reactor config,
  and drainer schedule on the next highstate for operators who want to keep
  highstate-only behavior
- set restart_policy: unless-stopped on 23 container states so docker
  recovers crashes without waiting for the next highstate; leave registry
  (always), strelka/backend (on-failure), kratos, and hydra alone with
  inline comments explaining why
2026-04-10 15:43:16 -04:00
55 changed files with 1254 additions and 48 deletions
@@ -1,5 +1,3 @@
{% import_yaml 'salt/minion.defaults.yaml' as SALT_MINION_DEFAULTS -%}
#!/bin/bash #!/bin/bash
# #
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
@@ -25,7 +23,8 @@ SYSTEM_START_TIME=$(date -d "$(</proc/uptime awk '{print $1}') seconds ago" +%s)
LAST_HIGHSTATE_END=$([ -e "/opt/so/log/salt/lasthighstate" ] && date -r /opt/so/log/salt/lasthighstate +%s || echo 0) LAST_HIGHSTATE_END=$([ -e "/opt/so/log/salt/lasthighstate" ] && date -r /opt/so/log/salt/lasthighstate +%s || echo 0)
LAST_HEALTHCHECK_STATE_APPLY=$([ -e "/opt/so/log/salt/state-apply-test" ] && date -r /opt/so/log/salt/state-apply-test +%s || echo 0) LAST_HEALTHCHECK_STATE_APPLY=$([ -e "/opt/so/log/salt/state-apply-test" ] && date -r /opt/so/log/salt/state-apply-test +%s || echo 0)
# SETTING THRESHOLD TO ANYTHING UNDER 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default # SETTING THRESHOLD TO ANYTHING UNDER 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
THRESHOLD={{SALT_MINION_DEFAULTS.salt.minion.check_threshold}} #within how many seconds the file /opt/so/log/salt/state-apply-test must have been touched/modified before the salt minion is restarted # THRESHOLD is derived from the global push highstate interval + 1 hour, so the minion-check grace period tracks the schedule automatically.
THRESHOLD=$(( ({{ salt['pillar.get']('global:push:highstate_interval_hours', 2) }} + 1) * 3600 )) #within how many seconds the file /opt/so/log/salt/state-apply-test must have been touched/modified before the salt minion is restarted
THRESHOLD_DATE=$((LAST_HEALTHCHECK_STATE_APPLY+THRESHOLD)) THRESHOLD_DATE=$((LAST_HEALTHCHECK_STATE_APPLY+THRESHOLD))
logCmd() { logCmd() {
+1
View File
@@ -19,6 +19,7 @@ wait_for_elasticsearch:
so-elastalert: so-elastalert:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastalert:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastalert:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: elastalert - hostname: elastalert
- name: so-elastalert - name: so-elastalert
- user: so-elastalert - user: so-elastalert
@@ -15,6 +15,7 @@ include:
so-elastic-fleet-package-registry: so-elastic-fleet-package-registry:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-fleet-package-registry:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-fleet-package-registry:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- name: so-elastic-fleet-package-registry - name: so-elastic-fleet-package-registry
- hostname: Fleet-package-reg-{{ GLOBALS.hostname }} - hostname: Fleet-package-reg-{{ GLOBALS.hostname }}
- detach: True - detach: True
@@ -51,6 +52,16 @@ so-elastic-fleet-package-registry:
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }} - {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
wait_for_so-elastic-fleet-package-registry:
http.wait_for_successful_query:
- name: "http://localhost:8080/health"
- status: 200
- wait_for: 300
- request_interval: 15
- require:
- docker_container: so-elastic-fleet-package-registry
delete_so-elastic-fleet-package-registry_so-status.disabled: delete_so-elastic-fleet-package-registry_so-status.disabled:
file.uncomment: file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
+1
View File
@@ -16,6 +16,7 @@ include:
so-elastic-agent: so-elastic-agent:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- name: so-elastic-agent - name: so-elastic-agent
- hostname: {{ GLOBALS.hostname }} - hostname: {{ GLOBALS.hostname }}
- detach: True - detach: True
+1
View File
@@ -40,6 +40,7 @@ elasticagent_syncartifacts:
so-elastic-fleet: so-elastic-fleet:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- name: so-elastic-fleet - name: so-elastic-fleet
- hostname: FleetServer-{{ GLOBALS.hostname }} - hostname: FleetServer-{{ GLOBALS.hostname }}
- detach: True - detach: True
+1
View File
@@ -24,6 +24,7 @@ include:
so-elasticsearch: so-elasticsearch:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ ELASTICSEARCHMERGED.version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ ELASTICSEARCHMERGED.version }}
- restart_policy: unless-stopped
- hostname: elasticsearch - hostname: elasticsearch
- name: so-elasticsearch - name: so-elasticsearch
- user: elasticsearch - user: elasticsearch
+1 -1
View File
@@ -63,7 +63,7 @@
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } }, { "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } }, { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } }, { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} }, { "convert": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "type":"long", "ignore_missing": true } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } },
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } } { "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
+8 -1
View File
@@ -1,3 +1,10 @@
global: global:
pcapengine: SURICATA pcapengine: SURICATA
pipeline: REDIS pipeline: REDIS
push:
enabled: true
highstate_interval_hours: 2
debounce_seconds: 30
drain_interval: 15
batch: '25%'
batch_wait: 15
+37
View File
@@ -59,4 +59,41 @@ global:
description: Allows use of Endgame with Security Onion. This feature requires a license from Endgame. description: Allows use of Endgame with Security Onion. This feature requires a license from Endgame.
global: True global: True
advanced: True advanced: True
push:
enabled:
description: Master kill-switch for the active push feature. When disabled, rule and pillar changes are picked up at the next scheduled highstate instead of being pushed immediately.
forcedType: bool
helpLink: push
global: True
highstate_interval_hours:
description: How often every minion in the grid runs a scheduled state.highstate, in hours. Lower values keep minions closer in sync at the cost of more load; higher values reduce load but increase worst-case latency for non-pushed changes. The salt-minion health check restarts a minion if its last highstate is older than this value plus one hour.
forcedType: int
helpLink: push
global: True
advanced: True
debounce_seconds:
description: Trailing-edge debounce window in seconds. A push intent must be quiet for this long before the drainer dispatches. Rapid bursts of edits within this window coalesce into one dispatch.
forcedType: int
helpLink: push
global: True
advanced: True
drain_interval:
description: How often the push drainer checks for ready intents, in seconds. Small values lower dispatch latency at the cost of more background work on the manager.
forcedType: int
helpLink: push
global: True
advanced: True
batch:
description: "Host batch size for push orchestrations. A number (e.g. '10') or a percentage (e.g. '25%'). Limits how many minions run the push state at once so large fleets don't thundering-herd."
helpLink: push
global: True
advanced: True
regex: '^([0-9]+%?)$'
regexFailureMessage: Enter a whole number or a whole-number percentage (e.g. 10 or 25%).
batch_wait:
description: Seconds to wait between host batches in a push orchestration. Gives the fleet time to breathe between waves.
forcedType: int
helpLink: push
global: True
advanced: True
+1
View File
@@ -58,6 +58,7 @@ so-hydra:
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }} - {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
# Intentionally unless-stopped -- matches the fleet default.
- restart_policy: unless-stopped - restart_policy: unless-stopped
- watch: - watch:
- file: hydraconfig - file: hydraconfig
+1
View File
@@ -15,6 +15,7 @@ include:
so-idh: so-idh:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idh:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idh:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- name: so-idh - name: so-idh
- detach: True - detach: True
- network_mode: host - network_mode: host
+1
View File
@@ -18,6 +18,7 @@ include:
so-influxdb: so-influxdb:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-influxdb:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-influxdb:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: influxdb - hostname: influxdb
- networks: - networks:
- sobridge: - sobridge:
+1
View File
@@ -27,6 +27,7 @@ include:
so-kafka: so-kafka:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: so-kafka - hostname: so-kafka
- name: so-kafka - name: so-kafka
- networks: - networks:
+1
View File
@@ -16,6 +16,7 @@ include:
so-kibana: so-kibana:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kibana:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kibana:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: kibana - hostname: kibana
- user: kibana - user: kibana
- networks: - networks:
+1
View File
@@ -51,6 +51,7 @@ so-kratos:
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }} - {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
# Intentionally unless-stopped -- matches the fleet default.
- restart_policy: unless-stopped - restart_policy: unless-stopped
- watch: - watch:
- file: kratosschema - file: kratosschema
+1
View File
@@ -28,6 +28,7 @@ include:
so-logstash: so-logstash:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: so-logstash - hostname: so-logstash
- name: so-logstash - name: so-logstash
- networks: - networks:
+21
View File
@@ -0,0 +1,21 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'global/map.jinja' import GLOBALMERGED %}
include:
- salt.minion
{% if GLOBALS.is_manager and GLOBALMERGED.push.enabled %}
salt_beacons_pushstate:
file.managed:
- name: /etc/salt/minion.d/beacons_pushstate.conf
- source: salt://manager/files/beacons_pushstate.conf.jinja
- template: jinja
- watch_in:
- service: salt_minion_service
{% else %}
salt_beacons_pushstate:
file.absent:
- name: /etc/salt/minion.d/beacons_pushstate.conf
- watch_in:
- service: salt_minion_service
{% endif %}
@@ -0,0 +1,53 @@
beacons:
inotify:
- disable_during_state_run: True
- coalesce: True
- files:
/opt/so/saltstack/local/salt/suricata/rules:
mask:
- close_write
- moved_to
- delete
recurse: True
auto_add: True
exclude:
- '\.sw[a-z]$':
regex: True
- '~$':
regex: True
- '/4913$':
regex: True
- '/\.#':
regex: True
/opt/so/saltstack/local/salt/strelka/rules/compiled:
mask:
- close_write
- moved_to
- delete
recurse: True
auto_add: True
exclude:
- '\.sw[a-z]$':
regex: True
- '~$':
regex: True
- '/4913$':
regex: True
- '/\.#':
regex: True
/opt/so/saltstack/local/pillar:
mask:
- close_write
- moved_to
- delete
recurse: True
auto_add: True
exclude:
- '\.sw[a-z]$':
regex: True
- '~$':
regex: True
- '/4913$':
regex: True
- '/\.#':
regex: True
+2
View File
@@ -15,6 +15,7 @@ include:
- manager.elasticsearch - manager.elasticsearch
- manager.kibana - manager.kibana
- manager.managed_soc_annotations - manager.managed_soc_annotations
- manager.beacons
repo_log_dir: repo_log_dir:
file.directory: file.directory:
@@ -231,6 +232,7 @@ surifiltersrules:
- user: 939 - user: 939
- group: 939 - group: 939
{% else %} {% else %}
{{sls}}_state_not_allowed: {{sls}}_state_not_allowed:
+232
View File
@@ -0,0 +1,232 @@
#!/opt/saltstack/salt/bin/python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
"""
so-push-drainer
===============
Scheduled drainer for the active-push feature. Runs on the manager every
drain_interval seconds (default 15) via a salt schedule in salt/schedule.sls.
For each intent file under /opt/so/state/push_pending/*.json whose last_touch
is older than debounce_seconds, this script:
* concatenates the actions lists from every ready intent
* dedupes by (state or __highstate__, tgt, tgt_type)
* dispatches a single `salt-run state.orchestrate orch.push_batch --async`
with the deduped actions list passed as pillar kwargs
* deletes the contributed intent files on successful dispatch
Reactor sls files (push_suricata, push_strelka, push_pillar) write intents
but never dispatch directly -- see plan
/home/mreeves/.claude/plans/goofy-marinating-hummingbird.md for the full design.
"""
import fcntl
import glob
import json
import logging
import logging.handlers
import os
import subprocess
import sys
import time
import salt.client
PENDING_DIR = '/opt/so/state/push_pending'
LOCK_FILE = os.path.join(PENDING_DIR, '.lock')
LOG_FILE = '/opt/so/log/salt/so-push-drainer.log'
HIGHSTATE_SENTINEL = '__highstate__'
def _make_logger():
logger = logging.getLogger('so-push-drainer')
logger.setLevel(logging.INFO)
if not logger.handlers:
os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
handler = logging.handlers.RotatingFileHandler(
LOG_FILE, maxBytes=5 * 1024 * 1024, backupCount=3,
)
handler.setFormatter(logging.Formatter(
'%(asctime)s | %(levelname)s | %(message)s',
))
logger.addHandler(handler)
return logger
def _load_push_cfg():
"""Read the global:push pillar subtree via salt-call. Returns a dict."""
caller = salt.client.Caller()
cfg = caller.cmd('pillar.get', 'global:push', {})
return cfg if isinstance(cfg, dict) else {}
def _read_intent(path, log):
try:
with open(path, 'r') as f:
return json.load(f)
except (IOError, ValueError) as exc:
log.warning('cannot read intent %s: %s', path, exc)
return None
except Exception:
log.exception('unexpected error reading %s', path)
return None
def _dedupe_actions(actions):
seen = set()
deduped = []
for action in actions:
if not isinstance(action, dict):
continue
state_key = HIGHSTATE_SENTINEL if action.get('highstate') else action.get('state')
tgt = action.get('tgt')
tgt_type = action.get('tgt_type', 'compound')
if not state_key or not tgt:
continue
key = (state_key, tgt, tgt_type)
if key in seen:
continue
seen.add(key)
deduped.append(action)
return deduped
def _dispatch(actions, log):
pillar_arg = json.dumps({'actions': actions})
cmd = [
'salt-run',
'state.orchestrate',
'orch.push_batch',
'pillar={}'.format(pillar_arg),
'--async',
]
log.info('dispatching: %s', ' '.join(cmd[:3]) + ' pillar=<{} actions>'.format(len(actions)))
try:
result = subprocess.run(
cmd, check=True, capture_output=True, text=True, timeout=60,
)
except subprocess.CalledProcessError as exc:
log.error('dispatch failed (rc=%s): stdout=%s stderr=%s',
exc.returncode, exc.stdout, exc.stderr)
return False
except subprocess.TimeoutExpired:
log.error('dispatch timed out after 60s')
return False
except Exception:
log.exception('dispatch raised')
return False
log.info('dispatch accepted: %s', (result.stdout or '').strip())
return True
def main():
log = _make_logger()
if not os.path.isdir(PENDING_DIR):
# Nothing to do; reactors create the dir on first use.
return 0
try:
push = _load_push_cfg()
except Exception:
log.exception('failed to read global:push pillar; aborting drain pass')
return 1
if not push.get('enabled', True):
log.debug('push disabled; exiting')
return 0
debounce_seconds = int(push.get('debounce_seconds', 30))
os.makedirs(PENDING_DIR, exist_ok=True)
lock_fd = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR, 0o644)
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX)
intent_files = [
p for p in sorted(glob.glob(os.path.join(PENDING_DIR, '*.json')))
if os.path.basename(p) != '.lock'
]
if not intent_files:
return 0
now = time.time()
ready = []
skipped = 0
broken = []
for path in intent_files:
intent = _read_intent(path, log)
if not isinstance(intent, dict):
broken.append(path)
continue
last_touch = intent.get('last_touch', 0)
if now - last_touch < debounce_seconds:
skipped += 1
continue
ready.append((path, intent))
for path in broken:
try:
os.unlink(path)
except OSError:
pass
if not ready:
if skipped:
log.debug('no ready intents (%d still in debounce window)', skipped)
return 0
combined_actions = []
oldest_first_touch = now
all_paths = []
for path, intent in ready:
combined_actions.extend(intent.get('actions', []) or [])
first = intent.get('first_touch', now)
if first < oldest_first_touch:
oldest_first_touch = first
all_paths.extend(intent.get('paths', []) or [])
deduped = _dedupe_actions(combined_actions)
if not deduped:
log.warning('%d intent(s) had no usable actions; clearing', len(ready))
for path, _ in ready:
try:
os.unlink(path)
except OSError:
pass
return 0
debounce_duration = now - oldest_first_touch
log.info(
'draining %d intent(s): %d action(s) after dedupe (raw=%d), '
'debounce_duration=%.1fs, paths=%s',
len(ready), len(deduped), len(combined_actions),
debounce_duration, all_paths[:20],
)
if not _dispatch(deduped, log):
log.warning('dispatch failed; leaving intent files in place for retry')
return 1
for path, _ in ready:
try:
os.unlink(path)
except OSError:
log.exception('failed to remove drained intent %s', path)
return 0
finally:
try:
fcntl.flock(lock_fd, fcntl.LOCK_UN)
finally:
os.close(lock_fd)
if __name__ == '__main__':
sys.exit(main())
+1
View File
@@ -34,6 +34,7 @@ make-rule-dir-nginx:
so-nginx: so-nginx:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: so-nginx - hostname: so-nginx
- networks: - networks:
- sobridge: - sobridge:
+10 -1
View File
@@ -3,7 +3,14 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% set hypervisor = pillar.minion_id %} {% set hypervisor = pillar.get('minion_id', '') %}
{% if not hypervisor|regex_match('^([A-Za-z0-9._-]{1,253})$') %}
{% do salt.log.error('delete_hypervisor_orch: refusing unsafe minion_id=' ~ hypervisor) %}
delete_hypervisor_invalid_minion_id:
test.fail_without_changes:
- name: delete_hypervisor_invalid_minion_id
{% else %}
ensure_hypervisor_mine_deleted: ensure_hypervisor_mine_deleted:
salt.function: salt.function:
@@ -20,3 +27,5 @@ update_salt_cloud_profile:
- sls: - sls:
- salt.cloud.config - salt.cloud.config
- concurrent: True - concurrent: True
{% endif %}
+37
View File
@@ -0,0 +1,37 @@
{% from 'global/map.jinja' import GLOBALMERGED %}
{% set actions = salt['pillar.get']('actions', []) %}
{% set BATCH = GLOBALMERGED.push.batch %}
{% set BATCH_WAIT = GLOBALMERGED.push.batch_wait %}
{% for action in actions %}
{% if action.get('highstate') %}
apply_highstate_{{ loop.index }}:
salt.state:
- tgt: '{{ action.tgt }}'
- tgt_type: {{ action.get('tgt_type', 'compound') }}
- highstate: True
- batch: {{ action.get('batch', BATCH) }}
- batch_wait: {{ action.get('batch_wait', BATCH_WAIT) }}
- kwarg:
queue: 2
{% else %}
refresh_pillar_{{ loop.index }}:
salt.function:
- name: saltutil.refresh_pillar
- tgt: '{{ action.tgt }}'
- tgt_type: {{ action.get('tgt_type', 'compound') }}
apply_{{ action.state | replace('.', '_') }}_{{ loop.index }}:
salt.state:
- tgt: '{{ action.tgt }}'
- tgt_type: {{ action.get('tgt_type', 'compound') }}
- sls:
- {{ action.state }}
- batch: {{ action.get('batch', BATCH) }}
- batch_wait: {{ action.get('batch_wait', BATCH_WAIT) }}
- kwarg:
queue: 2
- require:
- salt: refresh_pillar_{{ loop.index }}
{% endif %}
{% endfor %}
+10 -1
View File
@@ -12,7 +12,14 @@
{% if 'vrt' in salt['pillar.get']('features', []) %} {% if 'vrt' in salt['pillar.get']('features', []) %}
{% do salt.log.debug('vm_pillar_clean_orch: Running') %} {% do salt.log.debug('vm_pillar_clean_orch: Running') %}
{% set vm_name = pillar.get('vm_name') %} {% set vm_name = pillar.get('vm_name', '') %}
{% if not vm_name|regex_match('^([A-Za-z0-9._-]{1,253})$') %}
{% do salt.log.error('vm_pillar_clean_orch: refusing unsafe vm_name=' ~ vm_name) %}
vm_pillar_clean_invalid_name:
test.fail_without_changes:
- name: vm_pillar_clean_invalid_name
{% else %}
delete_adv_{{ vm_name }}_pillar: delete_adv_{{ vm_name }}_pillar:
module.run: module.run:
@@ -24,6 +31,8 @@ delete_{{ vm_name }}_pillar:
- file.remove: - file.remove:
- path: /opt/so/saltstack/local/pillar/minions/{{ vm_name }}.sls - path: /opt/so/saltstack/local/pillar/minions/{{ vm_name }}.sls
{% endif %}
{% else %} {% else %}
{% do salt.log.error( {% do salt.log.error(
+6 -4
View File
@@ -3,12 +3,15 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% if data['id'].endswith('_hypervisor') and data['result'] == True %} {% set hid = data['id'] %}
{% if hid|regex_match('^([A-Za-z0-9._-]{1,253})$')
and hid.endswith('_hypervisor')
and data['result'] == True %}
{% if data['act'] == 'accept' %} {% if data['act'] == 'accept' %}
check_and_trigger: check_and_trigger:
runner.setup_hypervisor.setup_environment: runner.setup_hypervisor.setup_environment:
- minion_id: {{ data['id'] }} - minion_id: {{ hid }}
{% endif %} {% endif %}
{% if data['act'] == 'delete' %} {% if data['act'] == 'delete' %}
@@ -17,8 +20,7 @@ delete_hypervisor:
- args: - args:
- mods: orch.delete_hypervisor - mods: orch.delete_hypervisor
- pillar: - pillar:
minion_id: {{ data['id'] }} minion_id: {{ hid }}
{% endif %} {% endif %}
{% endif %} {% endif %}
+27 -15
View File
@@ -1,7 +1,7 @@
#!py #!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
@@ -9,30 +9,42 @@ import logging
import os import os
import pwd import pwd
import grp import grp
import re
log = logging.getLogger(__name__)
PILLAR_ROOT = '/opt/so/saltstack/local/pillar/minions/'
_VMNAME_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
def run(): def run():
vm_name = data['kwargs']['name'] vm_name = data.get('kwargs', {}).get('name', '')
logging.error("createEmptyPillar reactor: vm_name: %s" % vm_name) if not _VMNAME_RE.match(str(vm_name)):
pillar_root = '/opt/so/saltstack/local/pillar/minions/' log.error("createEmptyPillar reactor: refusing unsafe vm_name=%r", vm_name)
return {}
log.info("createEmptyPillar reactor: vm_name: %s", vm_name)
pillar_files = ['adv_' + vm_name + '.sls', vm_name + '.sls'] pillar_files = ['adv_' + vm_name + '.sls', vm_name + '.sls']
try: try:
# Get socore user and group IDs
socore_uid = pwd.getpwnam('socore').pw_uid socore_uid = pwd.getpwnam('socore').pw_uid
socore_gid = grp.getgrnam('socore').gr_gid socore_gid = grp.getgrnam('socore').gr_gid
pillar_root_real = os.path.realpath(PILLAR_ROOT)
for f in pillar_files: for f in pillar_files:
full_path = pillar_root + f full_path = os.path.join(PILLAR_ROOT, f)
if not os.path.exists(full_path): resolved = os.path.realpath(full_path)
# Create empty file if os.path.dirname(resolved) != pillar_root_real:
os.mknod(full_path) log.error("createEmptyPillar reactor: refusing path outside pillar root: %s", resolved)
# Set ownership to socore:socore continue
os.chown(full_path, socore_uid, socore_gid) if os.path.exists(resolved):
# Set mode to 644 (rw-r--r--) continue
os.chmod(full_path, 0o640) os.mknod(resolved)
logging.error("createEmptyPillar reactor: created %s with socore:socore ownership and mode 644" % f) os.chown(resolved, socore_uid, socore_gid)
os.chmod(resolved, 0o640)
log.info("createEmptyPillar reactor: created %s with socore:socore ownership and mode 0640", f)
except (KeyError, OSError) as e: except (KeyError, OSError) as e:
logging.error("createEmptyPillar reactor: Error setting ownership/permissions: %s" % str(e)) log.error("createEmptyPillar reactor: Error setting ownership/permissions: %s", e)
return {} return {}
+33 -11
View File
@@ -1,18 +1,40 @@
#!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
remove_key: import logging
wheel.key.delete: import re
- args:
- match: {{ data['name'] }}
{{ data['name'] }}_pillar_clean: log = logging.getLogger(__name__)
runner.state.orchestrate:
- args:
- mods: orch.vm_pillar_clean
- pillar:
vm_name: {{ data['name'] }}
{% do salt.log.info('deleteKey reactor: deleted minion key: %s' % data['name']) %} _VMNAME_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
def run():
name = data.get('name', '')
if not _VMNAME_RE.match(str(name)):
log.error("deleteKey reactor: refusing unsafe name=%r", name)
return {}
log.info("deleteKey reactor: deleted minion key: %s", name)
return {
'remove_key': {
'wheel.key.delete': [
{'args': [
{'match': name},
]},
],
},
'%s_pillar_clean' % name: {
'runner.state.orchestrate': [
{'args': [
{'mods': 'orch.vm_pillar_clean'},
{'pillar': {'vm_name': name}},
]},
],
},
}
+133
View File
@@ -0,0 +1,133 @@
# One pillar directory can map to multiple (state, tgt) actions.
# tgt is a raw salt compound expression. tgt_type is always "compound".
# Per-action `batch` / `batch_wait` override the orch defaults (25% / 15s).
#
# Notes:
# - `bpf` is a pillar-only dir (no state of its own) consumed by both
# zeek and suricata via macros, so a bpf pillar change re-applies both.
# - suricata/strelka/zeek/elasticsearch/redis/kafka/logstash etc. have
# their own pillar dirs AND their own state, so they map 1:1 (or 1:2
# in strelka's case, because of the split init.sls / manager.sls).
# - `data` and `node_data` pillar dirs are intentionally omitted --
# they're pillar-only data consumed by many states; trying to handle
# them generically would amount to a highstate.
#
# The role sets here were verified line-by-line against salt/top.sls. If
# salt/top.sls changes how an app is targeted, update the corresponding
# compound here.
# firewall: the one pillar everyone touches. Applied everywhere intentionally
# because every host's iptables needs to know about every other host in the
# grid. Salt's firewall state is idempotent (file.managed + iptables-restore
# onchanges in salt/firewall/init.sls), so hosts whose rendered firewall is
# unchanged do a file comparison and no-op without touching iptables -- actual
# reload happens only on the hosts whose rules actually changed. Fleetwide
# blast radius is intentional and matches the pre-plan behavior via highstate.
# Adding N sensors in a burst coalesces into one dispatch via the drainer.
firewall:
- state: firewall
tgt: '*'
# bpf is pillar-only (no state); consumed by both zeek and suricata as macros.
# Both states run on sensor_roles + so-import per salt/top.sls.
bpf:
- state: zeek
tgt: 'G@role:so-eval or G@role:so-heavynode or G@role:so-import or G@role:so-sensor or G@role:so-standalone'
- state: suricata
tgt: 'G@role:so-eval or G@role:so-heavynode or G@role:so-import or G@role:so-sensor or G@role:so-standalone'
# ca is applied universally.
ca:
- state: ca
tgt: '*'
# elastalert: eval, standalone, manager, managerhype, managersearch (NOT import).
elastalert:
- state: elastalert
tgt: 'G@role:so-eval or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone'
# elasticsearch: 8 roles.
elasticsearch:
- state: elasticsearch
tgt: 'G@role:so-eval or G@role:so-heavynode or G@role:so-import or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-searchnode or G@role:so-standalone'
# elasticagent: so-heavynode only.
elasticagent:
- state: elasticagent
tgt: 'G@role:so-heavynode'
# elasticfleet: base state only on pillar change. elasticfleet.install_agent_grid
# is a deploy/enrollment step, not a config reload; leave it to the next highstate.
elasticfleet:
- state: elasticfleet
tgt: 'G@role:so-eval or G@role:so-fleet or G@role:so-import or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone'
# healthcheck: eval, sensor, standalone only.
healthcheck:
- state: healthcheck
tgt: 'G@role:so-eval or G@role:so-sensor or G@role:so-standalone'
# influxdb: manager_roles exactly.
influxdb:
- state: influxdb
tgt: 'G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone'
# kafka: standalone, manager, managerhype, managersearch, searchnode, receiver.
kafka:
- state: kafka
tgt: 'G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode or G@role:so-standalone'
# kibana: manager_roles exactly.
kibana:
- state: kibana
tgt: 'G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone'
# logstash: 8 roles, no eval/import.
logstash:
- state: logstash
tgt: 'G@role:so-fleet or G@role:so-heavynode or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode or G@role:so-standalone'
# nginx: 10 specific roles. NOT receiver, idh, hypervisor, desktop.
nginx:
- state: nginx
tgt: 'G@role:so-eval or G@role:so-fleet or G@role:so-heavynode or G@role:so-import or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-searchnode or G@role:so-sensor or G@role:so-standalone'
# redis: 6 roles. standalone, manager, managerhype, managersearch, heavynode, receiver.
# (NOT eval, NOT import, NOT searchnode.)
redis:
- state: redis
tgt: 'G@role:so-heavynode or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-receiver or G@role:so-standalone'
# sensoroni: universal.
sensoroni:
- state: sensoroni
tgt: '*'
# soc: manager_roles exactly.
soc:
- state: soc
tgt: 'G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone'
# strelka: sensor-side only on pillar change (sensor_roles). strelka.manager is
# intentionally NOT fired on pillar changes -- YARA rule and strelka config
# pillar changes are consumed by the sensor-side strelka backend, and re-running
# strelka.manager on managers is both unnecessary and disruptive. strelka.manager
# is left to the 2-hour highstate.
strelka:
- state: strelka
tgt: 'G@role:so-eval or G@role:so-heavynode or G@role:so-sensor or G@role:so-standalone'
# suricata: sensor_roles + so-import (5 roles).
suricata:
- state: suricata
tgt: 'G@role:so-eval or G@role:so-heavynode or G@role:so-import or G@role:so-sensor or G@role:so-standalone'
# telegraf: universal.
telegraf:
- state: telegraf
tgt: '*'
# zeek: sensor_roles + so-import (5 roles).
zeek:
- state: zeek
tgt: 'G@role:so-eval or G@role:so-heavynode or G@role:so-import or G@role:so-sensor or G@role:so-standalone'
+170
View File
@@ -0,0 +1,170 @@
#!py
# Reactor invoked by the inotify beacon on pillar file changes under
# /opt/so/saltstack/local/pillar/.
#
# Two branches:
# A) per-minion override under pillar/minions/<id>.sls or adv_<id>.sls
# -> write an intent that runs state.highstate on just that minion.
# B) shared app pillar (pillar/<app>/...) -> look up <app> in
# pillar_push_map.yaml and write an intent with the entry's actions.
#
# Reactors never dispatch directly. The so-push-drainer schedule picks up
# ready intents, dedupes across pending files, and dispatches orch.push_batch.
# See plan /home/mreeves/.claude/plans/goofy-marinating-hummingbird.md.
import fcntl
import json
import logging
import os
import time
from salt.client import Caller
import yaml
LOG = logging.getLogger(__name__)
PENDING_DIR = '/opt/so/state/push_pending'
LOCK_FILE = os.path.join(PENDING_DIR, '.lock')
MAX_PATHS = 20
PILLAR_ROOT = '/opt/so/saltstack/local/pillar/'
MINIONS_PREFIX = PILLAR_ROOT + 'minions/'
# The pillar_push_map.yaml is shipped via salt:// but the reactor runs on the
# master, which mounts the default saltstack tree at this path.
PUSH_MAP_PATH = '/opt/so/saltstack/default/salt/reactor/pillar_push_map.yaml'
_PUSH_MAP_CACHE = {'mtime': 0, 'data': None}
def _load_push_map():
try:
st = os.stat(PUSH_MAP_PATH)
except OSError:
LOG.warning('push_pillar: %s not found', PUSH_MAP_PATH)
return {}
if _PUSH_MAP_CACHE['mtime'] != st.st_mtime:
try:
with open(PUSH_MAP_PATH, 'r') as f:
_PUSH_MAP_CACHE['data'] = yaml.safe_load(f) or {}
except Exception:
LOG.exception('push_pillar: failed to load %s', PUSH_MAP_PATH)
_PUSH_MAP_CACHE['data'] = {}
_PUSH_MAP_CACHE['mtime'] = st.st_mtime
return _PUSH_MAP_CACHE['data'] or {}
def _push_enabled():
try:
caller = Caller()
return bool(caller.cmd('pillar.get', 'global:push:enabled', True))
except Exception:
LOG.exception('push_pillar: pillar.get global:push:enabled failed, assuming enabled')
return True
def _write_intent(key, actions, path):
now = time.time()
try:
os.makedirs(PENDING_DIR, exist_ok=True)
except OSError:
LOG.exception('push_pillar: cannot create %s', PENDING_DIR)
return
intent_path = os.path.join(PENDING_DIR, '{}.json'.format(key))
lock_fd = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR, 0o644)
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX)
intent = {}
if os.path.exists(intent_path):
try:
with open(intent_path, 'r') as f:
intent = json.load(f)
except (IOError, ValueError):
intent = {}
intent.setdefault('first_touch', now)
intent['last_touch'] = now
intent['actions'] = actions
paths = intent.get('paths', [])
if path and path not in paths:
paths.append(path)
paths = paths[-MAX_PATHS:]
intent['paths'] = paths
tmp_path = intent_path + '.tmp'
with open(tmp_path, 'w') as f:
json.dump(intent, f)
os.rename(tmp_path, intent_path)
except Exception:
LOG.exception('push_pillar: failed to write intent %s', intent_path)
finally:
try:
fcntl.flock(lock_fd, fcntl.LOCK_UN)
finally:
os.close(lock_fd)
def _minion_id_from_path(path):
# path is e.g. /opt/so/saltstack/local/pillar/minions/sensor1.sls
# or /opt/so/saltstack/local/pillar/minions/adv_sensor1.sls
filename = os.path.basename(path)
if not filename.endswith('.sls'):
return None
stem = filename[:-4]
if stem.startswith('adv_'):
stem = stem[4:]
return stem or None
def _app_from_path(path):
# path is e.g. /opt/so/saltstack/local/pillar/zeek/soc_zeek.sls -> 'zeek'
remainder = path[len(PILLAR_ROOT):]
if '/' not in remainder:
return None
return remainder.split('/', 1)[0] or None
def run():
if not _push_enabled():
LOG.info('push_pillar: push disabled, skipping')
return {}
path = data.get('path', '') # noqa: F821 -- data provided by reactor
if not path or not path.startswith(PILLAR_ROOT):
LOG.debug('push_pillar: ignoring path outside pillar root: %s', path)
return {}
# Branch A: per-minion override
if path.startswith(MINIONS_PREFIX):
minion_id = _minion_id_from_path(path)
if not minion_id:
LOG.debug('push_pillar: ignoring non-sls path under minions/: %s', path)
return {}
actions = [{'highstate': True, 'tgt': minion_id, 'tgt_type': 'glob'}]
_write_intent('minion_{}'.format(minion_id), actions, path)
LOG.info('push_pillar: per-minion intent updated for %s (path=%s)', minion_id, path)
return {}
# Branch B: shared app pillar -> allowlist lookup
app = _app_from_path(path)
if not app:
LOG.debug('push_pillar: ignoring path with no app segment: %s', path)
return {}
push_map = _load_push_map()
entry = push_map.get(app)
if not entry:
LOG.warning(
'push_pillar: pillar dir "%s" is not in pillar_push_map.yaml; '
'change will be picked up at the next scheduled highstate (path=%s)',
app, path,
)
return {}
actions = list(entry) # copy to avoid mutating the cache
_write_intent('pillar_{}'.format(app), actions, path)
LOG.info('push_pillar: app intent updated for %s (path=%s)', app, path)
return {}
+96
View File
@@ -0,0 +1,96 @@
#!py
# Reactor invoked by the inotify beacon on rule file changes under
# /opt/so/saltstack/local/salt/strelka/rules/compiled/.
#
# Writes (or updates) a push intent at /opt/so/state/push_pending/rules_strelka.json
# and returns {}. The so-push-drainer schedule picks up ready intents, dedupes
# across pending files, and dispatches orch.push_batch. Reactors never dispatch
# directly -- see plan /home/mreeves/.claude/plans/goofy-marinating-hummingbird.md.
import fcntl
import json
import logging
import os
import time
from salt.client import Caller
LOG = logging.getLogger(__name__)
PENDING_DIR = '/opt/so/state/push_pending'
LOCK_FILE = os.path.join(PENDING_DIR, '.lock')
MAX_PATHS = 20
# Mirrors GLOBALS.sensor_roles in salt/vars/globals.map.jinja. Sensor-side
# strelka runs on exactly these four roles; so-import gets strelka.manager
# instead, which is not fired on pillar changes.
SENSOR_ROLES = ['so-eval', 'so-heavynode', 'so-sensor', 'so-standalone']
def _sensor_compound():
return ' or '.join('G@role:{}'.format(r) for r in SENSOR_ROLES)
def _push_enabled():
try:
caller = Caller()
return bool(caller.cmd('pillar.get', 'global:push:enabled', True))
except Exception:
LOG.exception('push_strelka: pillar.get global:push:enabled failed, assuming enabled')
return True
def _write_intent(key, actions, path):
now = time.time()
try:
os.makedirs(PENDING_DIR, exist_ok=True)
except OSError:
LOG.exception('push_strelka: cannot create %s', PENDING_DIR)
return
intent_path = os.path.join(PENDING_DIR, '{}.json'.format(key))
lock_fd = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR, 0o644)
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX)
intent = {}
if os.path.exists(intent_path):
try:
with open(intent_path, 'r') as f:
intent = json.load(f)
except (IOError, ValueError):
intent = {}
intent.setdefault('first_touch', now)
intent['last_touch'] = now
intent['actions'] = actions
paths = intent.get('paths', [])
if path and path not in paths:
paths.append(path)
paths = paths[-MAX_PATHS:]
intent['paths'] = paths
tmp_path = intent_path + '.tmp'
with open(tmp_path, 'w') as f:
json.dump(intent, f)
os.rename(tmp_path, intent_path)
except Exception:
LOG.exception('push_strelka: failed to write intent %s', intent_path)
finally:
try:
fcntl.flock(lock_fd, fcntl.LOCK_UN)
finally:
os.close(lock_fd)
def run():
if not _push_enabled():
LOG.info('push_strelka: push disabled, skipping')
return {}
path = data.get('path', '') # noqa: F821 -- data provided by reactor
actions = [{'state': 'strelka', 'tgt': _sensor_compound()}]
_write_intent('rules_strelka', actions, path)
LOG.info('push_strelka: intent updated for path=%s', path)
return {}
+95
View File
@@ -0,0 +1,95 @@
#!py
# Reactor invoked by the inotify beacon on rule file changes under
# /opt/so/saltstack/local/salt/suricata/rules/.
#
# Writes (or updates) a push intent at /opt/so/state/push_pending/rules_suricata.json
# and returns {}. The so-push-drainer schedule picks up ready intents, dedupes
# across pending files, and dispatches orch.push_batch. Reactors never dispatch
# directly -- see plan /home/mreeves/.claude/plans/goofy-marinating-hummingbird.md.
import fcntl
import json
import logging
import os
import time
from salt.client import Caller
LOG = logging.getLogger(__name__)
PENDING_DIR = '/opt/so/state/push_pending'
LOCK_FILE = os.path.join(PENDING_DIR, '.lock')
MAX_PATHS = 20
# Mirrors GLOBALS.sensor_roles in salt/vars/globals.map.jinja. Suricata also
# runs on so-import per salt/top.sls, so that role is appended below.
SENSOR_ROLES = ['so-eval', 'so-heavynode', 'so-sensor', 'so-standalone']
def _sensor_compound_plus_import():
return ' or '.join('G@role:{}'.format(r) for r in SENSOR_ROLES) + ' or G@role:so-import'
def _push_enabled():
try:
caller = Caller()
return bool(caller.cmd('pillar.get', 'global:push:enabled', True))
except Exception:
LOG.exception('push_suricata: pillar.get global:push:enabled failed, assuming enabled')
return True
def _write_intent(key, actions, path):
now = time.time()
try:
os.makedirs(PENDING_DIR, exist_ok=True)
except OSError:
LOG.exception('push_suricata: cannot create %s', PENDING_DIR)
return
intent_path = os.path.join(PENDING_DIR, '{}.json'.format(key))
lock_fd = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR, 0o644)
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX)
intent = {}
if os.path.exists(intent_path):
try:
with open(intent_path, 'r') as f:
intent = json.load(f)
except (IOError, ValueError):
intent = {}
intent.setdefault('first_touch', now)
intent['last_touch'] = now
intent['actions'] = actions
paths = intent.get('paths', [])
if path and path not in paths:
paths.append(path)
paths = paths[-MAX_PATHS:]
intent['paths'] = paths
tmp_path = intent_path + '.tmp'
with open(tmp_path, 'w') as f:
json.dump(intent, f)
os.rename(tmp_path, intent_path)
except Exception:
LOG.exception('push_suricata: failed to write intent %s', intent_path)
finally:
try:
fcntl.flock(lock_fd, fcntl.LOCK_UN)
finally:
os.close(lock_fd)
def run():
if not _push_enabled():
LOG.info('push_suricata: push disabled, skipping')
return {}
path = data.get('path', '') # noqa: F821 -- data provided by reactor
actions = [{'state': 'suricata', 'tgt': _sensor_compound_plus_import()}]
_write_intent('rules_suricata', actions, path)
LOG.info('push_suricata: intent updated for path=%s', path)
return {}
+1
View File
@@ -17,6 +17,7 @@ include:
so-redis: so-redis:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: so-redis - hostname: so-redis
- user: socore - user: socore
- networks: - networks:
+3
View File
@@ -21,6 +21,9 @@ so-dockerregistry:
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-dockerregistry'].ip }} - ipv4_address: {{ DOCKERMERGED.containers['so-dockerregistry'].ip }}
# Intentionally `always` (not unless-stopped) -- registry is critical infra
# and must come back up even if it was manually stopped. Do not homogenize
# to unless-stopped; see the container auto-restart section of the plan.
- restart_policy: always - restart_policy: always
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-dockerregistry'].port_bindings %} {% for BINDING in DOCKERMERGED.containers['so-dockerregistry'].port_bindings %}
+4 -3
View File
@@ -3,7 +3,7 @@
{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 30) %} {% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 30) %}
include: include:
- salt - salt.minion
{% if CHECKS and ENABLED %} {% if CHECKS and ENABLED %}
salt_beacons: salt_beacons:
@@ -14,12 +14,13 @@ salt_beacons:
- defaults: - defaults:
CHECKS: {{ CHECKS }} CHECKS: {{ CHECKS }}
SCHEDULE: {{ SCHEDULE }} SCHEDULE: {{ SCHEDULE }}
- watch_in: - watch_in:
- service: salt_minion_service - service: salt_minion_service
{% else %} {% else %}
salt_beacons: salt_beacons:
file.absent: file.absent:
- name: /etc/salt/minion.d/beacons.conf - name: /etc/salt/minion.d/beacons.conf
- watch_in: - watch_in:
- service: salt_minion_service - service: salt_minion_service
{% endif %} {% endif %}
+13
View File
@@ -0,0 +1,13 @@
reactor:
- 'salt/beacon/*/inotify//opt/so/saltstack/local/salt/suricata/rules':
- salt://reactor/push_suricata.sls
- 'salt/beacon/*/inotify//opt/so/saltstack/local/salt/suricata/rules/*':
- salt://reactor/push_suricata.sls
- 'salt/beacon/*/inotify//opt/so/saltstack/local/salt/strelka/rules/compiled':
- salt://reactor/push_strelka.sls
- 'salt/beacon/*/inotify//opt/so/saltstack/local/salt/strelka/rules/compiled/*':
- salt://reactor/push_strelka.sls
- 'salt/beacon/*/inotify//opt/so/saltstack/local/pillar':
- salt://reactor/push_pillar.sls
- 'salt/beacon/*/inotify//opt/so/saltstack/local/pillar/*':
- salt://reactor/push_pillar.sls
+18
View File
@@ -10,10 +10,12 @@
# software that is protected by the license key." # software that is protected by the license key."
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% from 'global/map.jinja' import GLOBALMERGED %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
include: include:
- salt.minion - salt.minion
- salt.master.pyinotify
{% if 'vrt' in salt['pillar.get']('features', []) %} {% if 'vrt' in salt['pillar.get']('features', []) %}
- salt.cloud - salt.cloud
- salt.cloud.reactor_config_hypervisor - salt.cloud.reactor_config_hypervisor
@@ -62,6 +64,22 @@ engines_config:
- name: /etc/salt/master.d/engines.conf - name: /etc/salt/master.d/engines.conf
- source: salt://salt/files/engines.conf - source: salt://salt/files/engines.conf
{% if GLOBALMERGED.push.enabled %}
reactor_pushstate_config:
file.managed:
- name: /etc/salt/master.d/reactor_pushstate.conf
- source: salt://salt/files/reactor_pushstate.conf
- watch_in:
- service: salt_master_service
- order: last
{% else %}
reactor_pushstate_config:
file.absent:
- name: /etc/salt/master.d/reactor_pushstate.conf
- watch_in:
- service: salt_master_service
{% endif %}
# update the bootstrap script when used for salt-cloud # update the bootstrap script when used for salt-cloud
salt_bootstrap_cloud: salt_bootstrap_cloud:
file.managed: file.managed:
+20
View File
@@ -0,0 +1,20 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
pyinotify_module_package:
file.recurse:
- name: /opt/so/conf/salt/module_packages/pyinotify
- source: salt://salt/module_packages/pyinotify
- clean: True
- makedirs: True
pyinotify_python_module_install:
cmd.run:
- name: /opt/saltstack/salt/bin/python3.10 -m pip install pyinotify --no-index --find-links=/opt/so/conf/salt/module_packages/pyinotify/ --upgrade
- onchanges:
- file: pyinotify_module_package
- failhard: True
- watch_in:
- service: salt_minion_service
-1
View File
@@ -2,4 +2,3 @@
salt: salt:
minion: minion:
version: '3006.19' version: '3006.19'
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
+19 -3
View File
@@ -1,10 +1,26 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'global/map.jinja' import GLOBALMERGED %}
highstate_schedule: highstate_schedule:
schedule.present: schedule.present:
- function: state.highstate - function: state.highstate
- minutes: 15 - hours: {{ GLOBALMERGED.push.highstate_interval_hours }}
- maxrunning: 1 - maxrunning: 1
{% if not GLOBALS.is_manager %} {% if not GLOBALS.is_manager %}
- splay: 120 - splay: 1800
{% endif %}
{% if GLOBALS.is_manager and GLOBALMERGED.push.enabled %}
push_drain_schedule:
schedule.present:
- function: cmd.run
- job_args:
- /usr/sbin/so-push-drainer
- seconds: {{ GLOBALMERGED.push.drain_interval }}
- maxrunning: 1
- return_job: False
{% elif GLOBALS.is_manager %}
push_drain_schedule:
schedule.absent:
- name: push_drain_schedule
{% endif %} {% endif %}
+1
View File
@@ -14,6 +14,7 @@ include:
so-sensoroni: so-sensoroni:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- network_mode: host - network_mode: host
- binds: - binds:
- /nsm/import:/nsm/import:rw - /nsm/import:/nsm/import:rw
+1
View File
@@ -18,6 +18,7 @@ include:
so-soc: so-soc:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- hostname: soc - hostname: soc
- name: so-soc - name: so-soc
- networks: - networks:
+4
View File
@@ -47,6 +47,10 @@ strelka_backend:
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }} - {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
# Intentionally `on-failure` (not unless-stopped) -- strelka backend shuts
# down cleanly during rule reloads and we do not want those clean exits to
# trigger an auto-restart. Do not homogenize; see the container
# auto-restart section of the plan.
- restart_policy: on-failure - restart_policy: on-failure
- watch: - watch:
- file: strelkasensorcompiledrules - file: strelkasensorcompiledrules
+1
View File
@@ -15,6 +15,7 @@ include:
strelka_coordinator: strelka_coordinator:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- name: so-strelka-coordinator - name: so-strelka-coordinator
- networks: - networks:
- sobridge: - sobridge:
+1
View File
@@ -15,6 +15,7 @@ include:
strelka_filestream: strelka_filestream:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- binds: - binds:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro - /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka - /nsm/strelka:/nsm/strelka
+1
View File
@@ -15,6 +15,7 @@ include:
strelka_frontend: strelka_frontend:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- binds: - binds:
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro - /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
- /nsm/strelka/log/:/var/log/strelka/:rw - /nsm/strelka/log/:/var/log/strelka/:rw
+1
View File
@@ -15,6 +15,7 @@ include:
strelka_gatekeeper: strelka_gatekeeper:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- name: so-strelka-gatekeeper - name: so-strelka-gatekeeper
- networks: - networks:
- sobridge: - sobridge:
+1
View File
@@ -15,6 +15,7 @@ include:
strelka_manager: strelka_manager:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- binds: - binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro - /opt/so/conf/strelka/manager/:/etc/strelka/:ro
{% if DOCKERMERGED.containers['so-strelka-manager'].custom_bind_mounts %} {% if DOCKERMERGED.containers['so-strelka-manager'].custom_bind_mounts %}
+1
View File
@@ -18,6 +18,7 @@ so-suricata:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-suricata:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-suricata:{{ GLOBALS.so_version }}
- privileged: True - privileged: True
- restart_policy: unless-stopped
- environment: - environment:
- INTERFACE={{ GLOBALS.sensor.interface }} - INTERFACE={{ GLOBALS.sensor.interface }}
{% if DOCKERMERGED.containers['so-suricata'].extra_env %} {% if DOCKERMERGED.containers['so-suricata'].extra_env %}
+1
View File
@@ -7,6 +7,7 @@ so-tcpreplay:
docker_container.running: docker_container.running:
- network_mode: "host" - network_mode: "host"
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-tcpreplay:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-tcpreplay:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- name: so-tcpreplay - name: so-tcpreplay
- user: root - user: root
- interactive: True - interactive: True
+1
View File
@@ -18,6 +18,7 @@ include:
so-telegraf: so-telegraf:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-telegraf:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-telegraf:{{ GLOBALS.so_version }}
- restart_policy: unless-stopped
- user: 939 - user: 939
- group_add: 939,920 - group_add: 939,920
- environment: - environment:
+1
View File
@@ -18,6 +18,7 @@ so-zeek:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-zeek:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-zeek:{{ GLOBALS.so_version }}
- start: True - start: True
- privileged: True - privileged: True
- restart_policy: unless-stopped
{% if DOCKERMERGED.containers['so-zeek'].ulimits %} {% if DOCKERMERGED.containers['so-zeek'].ulimits %}
- ulimits: - ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-zeek'].ulimits %} {% for ULIMIT in DOCKERMERGED.containers['so-zeek'].ulimits %}
+79 -2
View File
@@ -745,6 +745,56 @@ configure_network_sensor() {
return $err return $err
} }
configure_management_bond() {
local bond_name="bond1"
local bond_mode=${MBOND_MODE:-active-backup}
info "Setting up $bond_name management interface with mode $bond_mode"
if [[ ${#MBNICS[@]} -eq 0 ]]; then
error "[ERROR] No management bond NICs were selected."
fail_setup
fi
nmcli -t -f NAME con show | grep -Fxq "$bond_name"
local found_int=$?
if [[ $found_int != 0 ]]; then
nmcli con add type bond ifname "$bond_name" con-name "$bond_name" mode "$bond_mode" -- \
ipv6.method ignore \
connection.autoconnect yes >> "$setup_log" 2>&1
else
nmcli con mod "$bond_name" \
bond.options "mode=$bond_mode" \
ipv6.method ignore \
connection.autoconnect yes >> "$setup_log" 2>&1
fi
local err=0
for MBNIC in "${MBNICS[@]}"; do
local slave_name="$bond_name-slave-$MBNIC"
nmcli -t -f NAME con show | grep -Fxq "$slave_name"
found_int=$?
if [[ $found_int != 0 ]]; then
nmcli con add type ethernet ifname "$MBNIC" con-name "$slave_name" master "$bond_name" -- \
connection.autoconnect yes >> "$setup_log" 2>&1
else
nmcli con mod "$slave_name" \
connection.master "$bond_name" \
connection.slave-type bond \
connection.autoconnect yes >> "$setup_log" 2>&1
fi
nmcli con up "$slave_name" >> "$setup_log" 2>&1
local ret=$?
[[ $ret -eq 0 ]] || err=$ret
done
return $err
}
configure_hyper_bridge() { configure_hyper_bridge() {
info "Setting up hypervisor bridge" info "Setting up hypervisor bridge"
info "Checking $MNIC ipv4.method is auto or manual" info "Checking $MNIC ipv4.method is auto or manual"
@@ -999,6 +1049,11 @@ filter_unused_nics() {
grep_string="$grep_string\|$BONDNIC" grep_string="$grep_string\|$BONDNIC"
done done
fi fi
if [[ $MBNICS ]]; then
for BONDNIC in "${MBNICS[@]}"; do
grep_string="$grep_string\|$BONDNIC"
done
fi
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use) # Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g') filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g')
@@ -1388,7 +1443,7 @@ network_init() {
title "Initializing Network" title "Initializing Network"
disable_ipv6 disable_ipv6
set_hostname set_hostname
if [[ ( $is_iso || $is_desktop_iso ) ]]; then if [[ $is_iso || $is_desktop_iso ]]; then
set_management_interface set_management_interface
fi fi
} }
@@ -1701,6 +1756,24 @@ remove_package() {
fi fi
} }
ensure_pyyaml() {
title "Ensuring python3-pyyaml is installed"
if rpm -q python3-pyyaml >/dev/null 2>&1; then
info "python3-pyyaml already installed"
return 0
fi
info "python3-pyyaml not found, attempting to install"
set -o pipefail
dnf -y install python3-pyyaml 2>&1 | tee -a "$setup_log"
local result=$?
set +o pipefail
if [[ $result -ne 0 ]] || ! rpm -q python3-pyyaml >/dev/null 2>&1; then
error "Failed to install python3-pyyaml (exit=$result)"
fail_setup
fi
info "python3-pyyaml installed successfully"
}
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml
# CAUTION! SALT VERSION UDDATES - READ BELOW # CAUTION! SALT VERSION UDDATES - READ BELOW
# When updating the salt version, also update the version in: # When updating the salt version, also update the version in:
@@ -2084,8 +2157,12 @@ set_initial_firewall_access() {
# Set up the management interface on the ISO # Set up the management interface on the ISO
set_management_interface() { set_management_interface() {
title "Setting up the main interface" title "Setting up the main interface"
if [[ $MNIC == "bond1" ]]; then
configure_management_bond || fail_setup
fi
if [ "$address_type" = 'DHCP' ]; then if [ "$address_type" = 'DHCP' ]; then
logCmd "nmcli con mod $MNIC connection.autoconnect yes" logCmd "nmcli con mod $MNIC connection.autoconnect yes ipv4.method auto"
logCmd "nmcli con up $MNIC" logCmd "nmcli con up $MNIC"
logCmd "nmcli -p connection show $MNIC" logCmd "nmcli -p connection show $MNIC"
else else
+3
View File
@@ -66,6 +66,9 @@ set_timezone
# Let's see what OS we are dealing with here # Let's see what OS we are dealing with here
detect_os detect_os
# Ensure python3-pyyaml is available before any code that may need so-yaml/PyYAML
ensure_pyyaml
# Check to see if this is the setup type of "desktop". # Check to see if this is the setup type of "desktop".
is_desktop= is_desktop=
+83 -2
View File
@@ -845,18 +845,99 @@ whiptail_management_nic() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
filter_unused_nics filter_unused_nics
local management_nic_options=( "${nic_list_management[@]}" )
if [[ $is_iso || $is_desktop_iso ]]; then
management_nic_options+=( "BOND" "Configure a bonded management interface" )
fi
MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 20 75 12 "${nic_list_management[@]}" 3>&1 1>&2 2>&3 ) MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 20 75 12 "${management_nic_options[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
while [ -z "$MNIC" ] while [ -z "$MNIC" ]
do do
MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 22 75 12 "${nic_list_management[@]}" 3>&1 1>&2 2>&3 ) MNIC=$(whiptail --title "$whiptail_title" --menu "Please select the NIC you would like to use for management.\n\nUse the arrow keys to move around and the Enter key to select." 22 75 12 "${management_nic_options[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
done done
if [[ $MNIC == "BOND" ]]; then
whiptail_management_bond
fi
}
whiptail_management_bond() {
[ -n "$TESTING" ] && return
MBOND_MODE=$(whiptail --title "$whiptail_title" --menu \
"Choose the bond mode for the management interface.\n\nThe management bond will be created as bond1." 20 75 7 \
"active-backup" "One active NIC with failover (recommended)" \
"balance-rr" "Round-robin transmit policy" \
"balance-xor" "Transmit based on selected hash policy" \
"broadcast" "Transmit everything on all slave interfaces" \
"802.3ad" "Dynamic link aggregation (requires switch support)" \
"balance-tlb" "Adaptive transmit load balancing" \
"balance-alb" "Adaptive load balancing" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [ -z "$MBOND_MODE" ]
do
MBOND_MODE=$(whiptail --title "$whiptail_title" --menu \
"Choose the bond mode for the management interface.\n\nThe management bond will be created as bond1." 20 75 7 \
"active-backup" "One active NIC with failover (recommended)" \
"balance-rr" "Round-robin transmit policy" \
"balance-xor" "Transmit based on selected hash policy" \
"broadcast" "Transmit everything on all slave interfaces" \
"802.3ad" "Dynamic link aggregation (requires switch support)" \
"balance-tlb" "Adaptive transmit load balancing" \
"balance-alb" "Adaptive load balancing" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
whiptail_management_bond_nics
MNIC="bond1"
export MBOND_MODE MNIC
}
whiptail_management_bond_nics() {
[ -n "$TESTING" ] && return
MBNICS=()
filter_unused_nics
MBNICS=$(whiptail --title "$whiptail_title" --checklist "Please add NICs to the Management Interface:" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [ -z "$MBNICS" ]
do
MBNICS=$(whiptail --title "$whiptail_title" --checklist "Please add NICs to the Management Interface:" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
MBNICS=$(echo "$MBNICS" | tr -d '"')
IFS=' ' read -ra MBNICS <<< "$MBNICS"
for bond_nic in "${MBNICS[@]}"; do
for dev_status in "${nmcli_dev_status_list[@]}"; do
if [[ $dev_status == "${bond_nic}:unmanaged" ]]; then
whiptail \
--title "$whiptail_title" \
--msgbox "$bond_nic is unmanaged by Network Manager. Please remove it from other network management tools then re-run setup." \
8 75
exit
fi
done
done
export MBNICS
} }
whiptail_net_method() { whiptail_net_method() {