Compare commits

..

1 Commits

Author SHA1 Message Date
reyesj2
452d864b88 WIP 2026-01-07 14:03:19 -06:00
266 changed files with 1814 additions and 2906 deletions

View File

@@ -33,7 +33,6 @@ body:
- 2.4.180
- 2.4.190
- 2.4.200
- 2.4.201
- 2.4.210
- Other (please provide detail below)
validations:

View File

@@ -1,17 +1,17 @@
### 2.4.201-20260114 ISO image released on 2026/1/15
### 2.4.200-20251216 ISO image released on 2025/12/16
### Download and Verify
2.4.201-20260114 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso
2.4.200-20251216 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso
MD5: 20E926E433203798512EF46E590C89B9
SHA1: 779E4084A3E1A209B494493B8F5658508B6014FA
SHA256: 3D10E7C885AEC5C5D4F4E50F9644FF9728E8C0A2E36EBB8C96B32569685A7C40
MD5: 07B38499952D1F2FD7B5AF10096D0043
SHA1: 7F3A26839CA3CAEC2D90BB73D229D55E04C7D370
SHA256: 8D3AC735873A2EA8527E16A6A08C34BD5018CBC0925AC4096E15A0C99F591D5F
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.201-20260114.iso.sig securityonion-2.4.201-20260114.iso
gpg --verify securityonion-2.4.200-20251216.iso.sig securityonion-2.4.200-20251216.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Wed 14 Jan 2026 05:23:39 PM EST using RSA key ID FE507013
gpg: Signature made Mon 15 Dec 2025 05:24:11 PM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.4.210
2.4.0-foxtrot

View File

@@ -1,2 +0,0 @@
ca:
server:

View File

@@ -1,6 +1,5 @@
base:
'*':
- ca
- global.soc_global
- global.adv_global
- docker.soc_docker

View File

@@ -15,7 +15,11 @@
'salt.minion-check',
'sensoroni',
'salt.lasthighstate',
'salt.minion',
'salt.minion'
] %}
{% set ssl_states = [
'ssl',
'telegraf',
'firewall',
'schedule',
@@ -24,7 +28,7 @@
{% set manager_states = [
'salt.master',
'ca.server',
'ca',
'registry',
'manager',
'nginx',
@@ -71,24 +75,28 @@
{# Map role-specific states #}
{% set role_states = {
'so-eval': (
ssl_states +
manager_states +
sensor_states +
elastic_stack_states | reject('equalto', 'logstash') | list +
['logstash.ssl']
elastic_stack_states | reject('equalto', 'logstash') | list
),
'so-heavynode': (
ssl_states +
sensor_states +
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
),
'so-idh': (
ssl_states +
['idh']
),
'so-import': (
ssl_states +
manager_states +
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager']
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
),
'so-manager': (
ssl_states +
manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states +
@@ -96,6 +104,7 @@
elastic_stack_states
),
'so-managerhype': (
ssl_states +
manager_states +
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
stig_states +
@@ -103,6 +112,7 @@
elastic_stack_states
),
'so-managersearch': (
ssl_states +
manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states +
@@ -110,10 +120,12 @@
elastic_stack_states
),
'so-searchnode': (
ssl_states +
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
stig_states
),
'so-standalone': (
ssl_states +
manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
sensor_states +
@@ -122,24 +134,29 @@
elastic_stack_states
),
'so-sensor': (
ssl_states +
sensor_states +
['nginx'] +
stig_states
),
'so-fleet': (
ssl_states +
stig_states +
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
),
'so-receiver': (
ssl_states +
kafka_states +
stig_states +
['logstash', 'redis']
),
'so-hypervisor': (
ssl_states +
stig_states +
['hypervisor', 'libvirt']
),
'so-desktop': (
['ssl', 'docker_clean', 'telegraf'] +
stig_states
)
} %}

View File

@@ -13,7 +13,7 @@
{% endif %}
{% if PCAPBPF %}
{% set PCAP_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
{% if PCAP_BPF_CALC['retcode'] == 0 %}
{% set PCAP_BPF_STATUS = 1 %}
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}

View File

@@ -9,7 +9,7 @@
{% set SURICATABPF = BPFMERGED.suricata %}
{% if SURICATABPF %}
{% set SURICATA_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
{% set SURICATA_BPF_STATUS = 1 %}
{% endif %}

View File

@@ -9,7 +9,7 @@
{% set ZEEKBPF = BPFMERGED.zeek %}
{% if ZEEKBPF %}
{% set ZEEK_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
{% set ZEEK_BPF_STATUS = 1 %}
{% endif %}

4
salt/ca/dirs.sls Normal file
View File

@@ -0,0 +1,4 @@
pki_issued_certs:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True

View File

@@ -3,10 +3,70 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
{% if GLOBALS.is_manager %}
- ca.server
- ca.dirs
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 0
- backup: True
- replace: False
- require:
- sls: ca.dirs
- timeout: 30
- retry:
attempts: 5
interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
- ca.trustca

View File

@@ -1,3 +0,0 @@
{% set CA = {
'server': pillar.ca.server
}%}

View File

@@ -1,35 +1,7 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set setup_running = salt['cmd.retcode']('pgrep -x so-setup') == 0 %}
{% if setup_running%}
include:
- ssl.remove
remove_pki_private_key:
pki_private_key:
file.absent:
- name: /etc/pki/ca.key
remove_pki_public_ca_crt:
pki_public_ca_crt:
file.absent:
- name: /etc/pki/ca.crt
remove_trusttheca:
file.absent:
- name: /etc/pki/tls/certs/intca.crt
remove_pki_public_ca_crt_symlink:
file.absent:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
{% else %}
so-setup_not_running:
test.show_notification:
- text: "This state is reserved for usage during so-setup."
{% endif %}

View File

@@ -1,63 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 7
- backup: True
- replace: False
- timeout: 30
- retry:
attempts: 5
interval: 30
pki_public_ca_crt_symlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
- target: /etc/pki/ca.crt
- require:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,15 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# when the salt-minion signs the cert, a copy is stored here
issued_certs_copypath:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True
signing_policy:
file.managed:
- name: /etc/salt/minion.d/signing_policies.conf
- source: salt://ca/files/signing_policies.conf

View File

@@ -1,26 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- docker
# Trust the CA
trusttheca:
file.managed:
- name: /etc/pki/tls/certs/intca.crt
- source: salt://ca/files/ca.crt
- watch_in:
- service: docker_running
- show_changes: False
- makedirs: True
{% if GLOBALS.os_family == 'Debian' %}
symlinkca:
file.symlink:
- target: /etc/pki/tls/certs/intca.crt
- name: /etc/ssl/certs/intca.crt
{% endif %}

View File

@@ -177,7 +177,7 @@ so-status_script:
- source: salt://common/tools/sbin/so-status
- mode: 755
{% if GLOBALS.is_sensor %}
{% if GLOBALS.role in GLOBALS.sensor_roles %}
# Add sensor cleanup
so-sensor-clean:
cron.present:

View File

@@ -404,25 +404,6 @@ is_single_node_grid() {
grep "role: so-" /etc/salt/grains | grep -E "eval|standalone|import" &> /dev/null
}
initialize_elasticsearch_indices() {
local index_names=$1
local default_entry=${2:-'{"@timestamp":"0"}'}
for idx in $index_names; do
if ! so-elasticsearch-query "$idx" --fail --retry 3 --retry-delay 30 >/dev/null 2>&1; then
echo "Index does not already exist. Initializing $idx index."
if retry 3 10 "so-elasticsearch-query "$idx/_doc" -d '$default_entry' -XPOST --fail 2>/dev/null" '"successful":1'; then
echo "Successfully initialized $idx index."
else
echo "Failed to initialize $idx index after 3 attempts."
fi
else
echo "Index $idx already exists. No action needed."
fi
done
}
lookup_bond_interfaces() {
cat /proc/net/bonding/bond0 | grep "Slave Interface:" | sed -e "s/Slave Interface: //g"
}
@@ -573,39 +554,21 @@ run_check_net_err() {
}
wait_for_salt_minion() {
local minion="$1"
local max_wait="${2:-30}"
local interval="${3:-2}"
local logfile="${4:-'/dev/stdout'}"
local elapsed=0
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting for salt-minion '$minion' to be ready..."
while [ $elapsed -lt $max_wait ]; do
# Check if service is running
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if salt-minion service is running"
if ! systemctl is-active --quiet salt-minion; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service not running (elapsed: ${elapsed}s)"
sleep $interval
elapsed=$((elapsed + interval))
continue
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service is running"
# Check if minion responds to ping
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if $minion responds to ping"
if salt "$minion" test.ping --timeout=3 --out=json 2>> "$logfile" | grep -q "true"; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion '$minion' is connected and ready!"
return 0
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting... (${elapsed}s / ${max_wait}s)"
sleep $interval
elapsed=$((elapsed + interval))
done
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - ERROR: salt-minion '$minion' not ready after $max_wait seconds"
return 1
local minion="$1"
local timeout="${2:-5}"
local logfile="${3:-'/dev/stdout'}"
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
attempt=$((attempt+1))
if [[ $attempt -eq $maxAttempts ]]; then
return 1
fi
sleep 10
done
return 0
}
salt_minion_count() {

View File

@@ -130,7 +130,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -161,9 +160,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error while parsing document for index \[.ds-logs-kratos-so-.*object mapping for \[file\]" # false positive (mapping error occuring BEFORE kratos index has rolled over in 2.4.210)
fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -227,7 +224,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint).*user so_kibana lacks the required permissions \[logs-\1" # Known issue with 3 integrations using kibana_system role vs creating unique api creds with proper permissions.
fi
RESULT=0

View File

@@ -3,16 +3,29 @@
{# we only want this state to run it is CentOS #}
{% if GLOBALS.os == 'OEL' %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
{% set manager = GLOBALS.manager %}
{% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set trusttheca_text = global_ca_text[0] %}
{% set ca_server = global_ca_server[0] %}
trusted_ca:
file.managed:
x509.pem_managed:
- name: /etc/pki/ca-trust/source/anchors/ca.crt
- source: salt://ca/files/ca.crt
- text: {{ trusttheca_text }}
update_ca_certs:
cmd.run:
- name: update-ca-trust
- onchanges:
- file: trusted_ca
- x509: trusted_ca
{% else %}

View File

@@ -6,9 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# docker service requires the ca.crt
# include ssl since docker service requires the intca
include:
- ca
- ssl
dockergroup:
group.present:
@@ -20,20 +20,20 @@ dockergroup:
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 2.2.1-1~debian.12~bookworm
- docker-ce: 5:29.2.1-1~debian.12~bookworm
- docker-ce-cli: 5:29.2.1-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:29.2.1-1~debian.12~bookworm
- containerd.io: 1.7.21-1
- docker-ce: 5:27.2.0-1~debian.12~bookworm
- docker-ce-cli: 5:27.2.0-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:27.2.0-1~debian.12~bookworm
- hold: True
- update_holds: True
{% elif grains.oscodename == 'jammy' %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 2.2.1-1~ubuntu.22.04~jammy
- docker-ce: 5:29.2.1-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:29.2.1-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:29.2.1-1~ubuntu.22.04~jammy
- containerd.io: 1.7.21-1
- docker-ce: 5:27.2.0-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:27.2.0-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.22.04~jammy
- hold: True
- update_holds: True
{% else %}
@@ -51,10 +51,10 @@ dockerheldpackages:
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 2.2.1-1.el9
- docker-ce: 3:29.2.1-1.el9
- docker-ce-cli: 1:29.2.1-1.el9
- docker-ce-rootless-extras: 29.2.1-1.el9
- containerd.io: 1.7.21-3.1.el9
- docker-ce: 3:27.2.0-1.el9
- docker-ce-cli: 1:27.2.0-1.el9
- docker-ce-rootless-extras: 27.2.0-1.el9
- hold: True
- update_holds: True
{% endif %}
@@ -89,9 +89,10 @@ docker_running:
- enable: True
- watch:
- file: docker_daemon
- x509: trusttheca
- require:
- file: docker_daemon
- file: trusttheca
- x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present

View File

@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- ca
- elasticagent.config
- elasticagent.sostatus
@@ -56,10 +55,8 @@ so-elastic-agent:
{% endif %}
- require:
- file: create-elastic-agent-config
- file: trusttheca
- watch:
- file: create-elastic-agent-config
- file: trusttheca
delete_so-elastic-agent_so-status.disabled:
file.uncomment:

View File

@@ -3,7 +3,7 @@
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
id: aea1ba80-1065-11ee-a369-97538913b6a9
revision: 4
revision: 1
outputs:
default:
type: elasticsearch
@@ -22,133 +22,242 @@ agent:
metrics: false
features: {}
inputs:
- id: filestream-filestream-85820eb0-25ef-11f0-a18d-1b26f69b8310
name: import-suricata-logs
revision: 3
type: filestream
- id: logfile-logs-fefef78c-422f-4cfa-8abf-4cd1b9428f62
name: import-evtx-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: filestream
name: log
version:
data_stream:
namespace: so
package_policy_id: 85820eb0-25ef-11f0-a18d-1b26f69b8310
package_policy_id: fefef78c-422f-4cfa-8abf-4cd1b9428f62
streams:
- id: filestream-filestream.generic-85820eb0-25ef-11f0-a18d-1b26f69b8310
- id: logfile-log.log-fefef78c-422f-4cfa-8abf-4cd1b9428f62
data_stream:
dataset: import
paths:
- /nsm/import/*/suricata/eve*.json
pipeline: suricata.common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
ignore_older: 72h
clean_inactive: -1
parsers: null
processors:
- add_fields:
target: event
fields:
category: network
module: suricata
imported: true
- dissect:
tokenizer: /nsm/import/%{import.id}/suricata/%{import.file}
field: log.file.path
target_prefix: ''
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-86b4e960-25ef-11f0-a18d-1b26f69b8310
name: import-zeek-logs
revision: 3
type: filestream
use_output: default
meta:
package:
name: filestream
version:
data_stream:
namespace: so
package_policy_id: 86b4e960-25ef-11f0-a18d-1b26f69b8310
streams:
- id: filestream-filestream.generic-86b4e960-25ef-11f0-a18d-1b26f69b8310
data_stream:
dataset: import
paths:
- /nsm/import/*/zeek/logs/*.log
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- >-
(broker|capture_loss|cluster|conn-summary|console|ecat_arp_info|known_certs|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout).log$
clean_inactive: -1
parsers: null
- /nsm/import/*/evtx/*.json
processors:
- dissect:
tokenizer: /nsm/import/%{import.id}/zeek/logs/%{import.file}
field: log.file.path
tokenizer: '/nsm/import/%{import.id}/evtx/%{import.file}'
target_prefix: ''
- script:
lang: javascript
source: |
function process(event) {
var pl = event.Get("import.file").slice(0,-4);
event.Put("@metadata.pipeline", "zeek." + pl);
}
- add_fields:
target: event
fields:
category: network
module: zeek
imported: true
- add_tags:
tags: ics
when:
regexp:
import.file: >-
^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-91741240-25ef-11f0-a18d-1b26f69b8310
name: soc-sensoroni-logs
revision: 3
type: filestream
use_output: default
meta:
package:
name: filestream
version:
data_stream:
namespace: so
package_policy_id: 91741240-25ef-11f0-a18d-1b26f69b8310
streams:
- id: filestream-filestream.generic-91741240-25ef-11f0-a18d-1b26f69b8310
data_stream:
dataset: soc
paths:
- /opt/so/log/sensoroni/sensoroni.log
pipeline: common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors:
- decode_json_fields:
fields:
- message
target: sensoroni
target: ''
- drop_fields:
ignore_missing: true
fields:
- host
- add_fields:
fields:
dataset: system.security
type: logs
namespace: default
target: data_stream
- add_fields:
fields:
dataset: system.security
module: system
imported: true
target: event
- then:
- add_fields:
fields:
dataset: windows.sysmon_operational
target: data_stream
- add_fields:
fields:
dataset: windows.sysmon_operational
module: windows
imported: true
target: event
if:
equals:
winlog.channel: Microsoft-Windows-Sysmon/Operational
- then:
- add_fields:
fields:
dataset: system.application
target: data_stream
- add_fields:
fields:
dataset: system.application
target: event
if:
equals:
winlog.channel: Application
- then:
- add_fields:
fields:
dataset: system.system
target: data_stream
- add_fields:
fields:
dataset: system.system
target: event
if:
equals:
winlog.channel: System
- then:
- add_fields:
fields:
dataset: windows.powershell_operational
target: data_stream
- add_fields:
fields:
dataset: windows.powershell_operational
module: windows
target: event
if:
equals:
winlog.channel: Microsoft-Windows-PowerShell/Operational
tags:
- import
- id: logfile-redis-fc98c947-7d17-4861-a318-7ad075f6d1b0
name: redis-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: redis
version:
data_stream:
namespace: default
package_policy_id: fc98c947-7d17-4861-a318-7ad075f6d1b0
streams:
- id: logfile-redis.log-fc98c947-7d17-4861-a318-7ad075f6d1b0
data_stream:
dataset: redis.log
type: logs
exclude_files:
- .gz$
paths:
- /opt/so/log/redis/redis.log
tags:
- redis-log
exclude_lines:
- '^\s+[\-`(''.|_]'
- id: logfile-logs-3b56803d-5ade-4c93-b25e-9b37182f66b8
name: import-suricata-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 3b56803d-5ade-4c93-b25e-9b37182f66b8
streams:
- id: logfile-log.log-3b56803d-5ade-4c93-b25e-9b37182f66b8
data_stream:
dataset: import
pipeline: suricata.common
paths:
- /nsm/import/*/suricata/eve*.json
processors:
- add_fields:
fields:
module: suricata
imported: true
category: network
target: event
- dissect:
field: log.file.path
tokenizer: '/nsm/import/%{import.id}/suricata/%{import.file}'
target_prefix: ''
- id: logfile-logs-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
name: soc-server-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: c327e1a3-1ebe-449c-a8eb-f6f35032e69d
streams:
- id: logfile-log.log-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/sensoroni-server.log
processors:
- decode_json_fields:
add_error_key: true
process_array: true
max_depth: 2
add_error_key: true
- add_fields:
target: event
fields:
- message
target: soc
- add_fields:
fields:
module: soc
dataset_temp: server
category: host
target: event
- rename:
ignore_missing: true
fields:
- from: soc.fields.sourceIp
to: source.ip
- from: soc.fields.status
to: http.response.status_code
- from: soc.fields.method
to: http.request.method
- from: soc.fields.path
to: url.path
- from: soc.message
to: event.action
- from: soc.level
to: log.level
tags:
- so-soc
- id: logfile-logs-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
name: soc-sensoroni-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
streams:
- id: logfile-log.log-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/sensoroni/sensoroni.log
processors:
- decode_json_fields:
add_error_key: true
process_array: true
max_depth: 2
fields:
- message
target: sensoroni
- add_fields:
fields:
module: soc
dataset_temp: sensoroni
category: host
target: event
- rename:
ignore_missing: true
fields:
- from: sensoroni.fields.sourceIp
to: source.ip
@@ -162,100 +271,141 @@ inputs:
to: event.action
- from: sensoroni.level
to: log.level
ignore_missing: true
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-976e3900-25ef-11f0-a18d-1b26f69b8310
name: suricata-logs
revision: 3
type: filestream
- id: logfile-logs-df0d7f2c-221f-433b-b18b-d1cf83250515
name: soc-salt-relay-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: filestream
name: log
version:
data_stream:
namespace: so
package_policy_id: 976e3900-25ef-11f0-a18d-1b26f69b8310
package_policy_id: df0d7f2c-221f-433b-b18b-d1cf83250515
streams:
- id: filestream-filestream.generic-976e3900-25ef-11f0-a18d-1b26f69b8310
- id: logfile-log.log-df0d7f2c-221f-433b-b18b-d1cf83250515
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/salt-relay.log
processors:
- dissect:
field: message
tokenizer: '%{soc.ts} | %{event.action}'
target_prefix: ''
- add_fields:
fields:
module: soc
dataset_temp: salt_relay
category: host
target: event
tags:
- so-soc
- id: logfile-logs-74bd2366-fe52-493c-bddc-843a017fc4d0
name: soc-auth-sync-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 74bd2366-fe52-493c-bddc-843a017fc4d0
streams:
- id: logfile-log.log-74bd2366-fe52-493c-bddc-843a017fc4d0
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/sync.log
processors:
- dissect:
field: message
tokenizer: '%{event.action}'
target_prefix: ''
- add_fields:
fields:
module: soc
dataset_temp: auth_sync
category: host
target: event
tags:
- so-soc
- id: logfile-logs-d151d9bf-ff2a-4529-9520-c99244bc0253
name: suricata-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: d151d9bf-ff2a-4529-9520-c99244bc0253
streams:
- id: logfile-log.log-d151d9bf-ff2a-4529-9520-c99244bc0253
data_stream:
dataset: suricata
pipeline: suricata.common
paths:
- /nsm/suricata/eve*.json
pipeline: suricata.common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors:
- add_fields:
target: event
fields:
category: network
module: suricata
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-95091fe0-25ef-11f0-a18d-1b26f69b8310
category: network
target: event
- id: logfile-logs-31f94d05-ae75-40ee-b9c5-0e0356eff327
name: strelka-logs
revision: 3
type: filestream
revision: 2
type: logfile
use_output: default
meta:
package:
name: filestream
name: log
version:
data_stream:
namespace: so
package_policy_id: 95091fe0-25ef-11f0-a18d-1b26f69b8310
package_policy_id: 31f94d05-ae75-40ee-b9c5-0e0356eff327
streams:
- id: filestream-filestream.generic-95091fe0-25ef-11f0-a18d-1b26f69b8310
- id: logfile-log.log-31f94d05-ae75-40ee-b9c5-0e0356eff327
data_stream:
dataset: strelka
pipeline: strelka.file
paths:
- /nsm/strelka/log/strelka.log
pipeline: strelka.file
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors:
- add_fields:
target: event
fields:
category: file
module: strelka
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-9f309ca0-25ef-11f0-a18d-1b26f69b8310
category: file
target: event
- id: logfile-logs-6197fe84-9b58-4d9b-8464-3d517f28808d
name: zeek-logs
revision: 2
type: filestream
revision: 1
type: logfile
use_output: default
meta:
package:
name: filestream
version:
name: log
version:
data_stream:
namespace: so
package_policy_id: 9f309ca0-25ef-11f0-a18d-1b26f69b8310
package_policy_id: 6197fe84-9b58-4d9b-8464-3d517f28808d
streams:
- id: filestream-filestream.generic-9f309ca0-25ef-11f0-a18d-1b26f69b8310
- id: logfile-log.log-6197fe84-9b58-4d9b-8464-3d517f28808d
data_stream:
dataset: zeek
paths:
- /nsm/zeek/logs/current/*.log
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- >-
(broker|capture_loss|cluster|conn-summary|console|ecat_arp_info|known_certs|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout).log$
clean_inactive: -1
parsers: null
processors:
- dissect:
tokenizer: /nsm/zeek/logs/current/%{pipeline}.log
tokenizer: '/nsm/zeek/logs/current/%{pipeline}.log'
field: log.file.path
trim_chars: .log
target_prefix: ''
@@ -277,17 +427,18 @@ inputs:
regexp:
pipeline: >-
^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
exclude_files:
- >-
broker|capture_loss|cluster|ecat_arp_info|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout.log$
- id: udp-udp-35051de0-46a5-11ee-8d5d-9f98c8182f60
name: syslog-udp-514
revision: 4
revision: 3
type: udp
use_output: default
meta:
package:
name: udp
version:
version: 1.10.0
data_stream:
namespace: so
package_policy_id: 35051de0-46a5-11ee-8d5d-9f98c8182f60
@@ -307,13 +458,13 @@ inputs:
- syslog
- id: tcp-tcp-33d37bb0-46a5-11ee-8d5d-9f98c8182f60
name: syslog-tcp-514
revision: 4
revision: 3
type: tcp
use_output: default
meta:
package:
name: tcp
version:
version: 1.10.0
data_stream:
namespace: so
package_policy_id: 33d37bb0-46a5-11ee-8d5d-9f98c8182f60

View File

@@ -11,7 +11,6 @@
include:
- elasticfleet.artifact_registry
- elasticfleet.ssl
# Add EA Group
elasticfleetgroup:
@@ -96,9 +95,6 @@ soresourcesrepoclone:
- rev: 'main'
- depth: 1
- force_reset: True
- retry:
attempts: 3
interval: 10
{% endif %}
elasticdefendconfdir:

View File

@@ -13,10 +13,9 @@
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
include:
- ca
- logstash.ssl
- elasticfleet.config
- elasticfleet.sostatus
- ssl
{% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
@@ -134,11 +133,6 @@ so-elastic-fleet:
{% endfor %}
{% endif %}
- watch:
- file: trusttheca
- x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt
- require:
- file: trusttheca
- x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt
{% endif %}

View File

@@ -2,7 +2,7 @@
{%- raw -%}
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "import-zeek-logs",
@@ -10,31 +10,19 @@
"description": "Zeek Import logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/import/*/zeek/logs/*.log"
],
"data_stream.dataset": "import",
"pipeline": "",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
}
}
}

View File

@@ -11,51 +11,36 @@
{%- endif -%}
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/kratos/kratos.log"
],
"data_stream.dataset": "kratos",
"pipeline": "kratos",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"tags": ["so-kratos"],
{%- if valid_identities -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
{%- else -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
{%- endif -%}
"tags": [
"so-kratos"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"custom": "pipeline: kratos"
}
}
}
}
},
"force": true
}
}

View File

@@ -2,38 +2,28 @@
{%- raw -%}
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"id": "zeek-logs",
"name": "zeek-logs",
"namespace": "so",
"description": "Zeek logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/zeek/logs/current/*.log"
],
"data_stream.dataset": "zeek",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
}
}
}
@@ -41,4 +31,4 @@
},
"force": true
}
{%- endraw -%}
{%- endraw -%}

View File

@@ -1,43 +1,26 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "hydra-logs",
"namespace": "so",
"description": "Hydra logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/hydra/hydra.log"
],
"data_stream.dataset": "hydra",
"pipeline": "hydra",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"tags": [
"so-hydra"
],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"tags": ["so-hydra"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"custom": "pipeline: hydra"
}
}
}
@@ -45,5 +28,3 @@
},
"force": true
}

View File

@@ -1,44 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "idh-logs",
"namespace": "so",
"description": "IDH integration",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/idh/opencanary.log"
],
"data_stream.dataset": "idh",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"tags": [],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,46 +1,33 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "import-evtx-logs",
"namespace": "so",
"description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"vars": {},
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/import/*/evtx/*.json"
],
"data_stream.dataset": "import",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"custom": "",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [
"import"
],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
]
}
}
}
}
},
"force": true
}
}

View File

@@ -1,45 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "import-suricata-logs",
"namespace": "so",
"description": "Import Suricata logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/import/*/suricata/eve*.json"
],
"data_stream.dataset": "import",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n",
"tags": [],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
"custom": "pipeline: suricata.common"
}
}
}
}
},
"force": true
}
}

View File

@@ -15,7 +15,7 @@
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/redis/redis-server.log"
"/opt/so/log/redis/redis.log"
],
"tags": [
"redis-log"

View File

@@ -1,17 +1,18 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "rita-logs",
"namespace": "so",
"description": "RITA Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"vars": {},
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
@@ -19,28 +20,15 @@
"/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv"
],
"data_stream.dataset": "rita",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"tags": [],
"recursive_glob": true,
"exclude_files": [],
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"data_stream.dataset": "rita",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,41 +1,29 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"vars": {},
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/custom-mappings/ip-descriptions.csv"
],
"data_stream.dataset": "hostnamemappings",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"tags": [
"so-ip-mappings"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"custom": ""
}
}
}
@@ -43,3 +31,5 @@
},
"force": true
}

View File

@@ -1,44 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/sync.log"
],
"data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"tags": ["so-soc"],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"tags": [],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,48 +1,35 @@
{
"policy_id": "so-grid-nodes_general",
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [
"so-soc"
],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,46 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/salt-relay.log"
],
"data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"tags": ["so-soc"],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"tags": [
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,44 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/sensoroni/sensoroni.log"
],
"data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}
"force": true
}

View File

@@ -1,46 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/sensoroni-server.log"
],
"data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"tags": ["so-soc"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,44 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "strelka-logs",
"description": "Strelka Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"description": "Strelka logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/strelka/log/strelka.log"
],
"data_stream.dataset": "strelka",
"pipeline": "strelka.file",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"tags": [],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"custom": "pipeline: strelka.file"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,44 +1,30 @@
{
"package": {
"name": "filestream",
"name": "log",
"version": ""
},
"name": "suricata-logs",
"namespace": "so",
"description": "Suricata integration",
"policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": {
"filestream-filestream": {
"logs-logfile": {
"enabled": true,
"streams": {
"filestream.generic": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/suricata/eve*.json"
],
"data_stream.dataset": "filestream.generic",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"data_stream.dataset": "suricata",
"tags": [],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"custom": "pipeline: suricata.common"
}
}
}
}
},
"force": true
}
}

View File

@@ -1,107 +0,0 @@
{
"package": {
"name": "elasticsearch",
"version": ""
},
"name": "elasticsearch-grid-nodes_heavy",
"namespace": "default",
"description": "Elasticsearch Logs",
"policy_id": "so-grid-nodes_heavy",
"inputs": {
"elasticsearch-logfile": {
"enabled": true,
"streams": {
"elasticsearch.audit": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_audit.json"
]
}
},
"elasticsearch.deprecation": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_deprecation.json"
]
}
},
"elasticsearch.gc": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/gc.log.[0-9]*",
"/var/log/elasticsearch/gc.log"
]
}
},
"elasticsearch.server": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/elasticsearch/*.json"
]
}
},
"elasticsearch.slowlog": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_index_search_slowlog.json",
"/var/log/elasticsearch/*_index_indexing_slowlog.json"
]
}
}
}
},
"elasticsearch-elasticsearch/metrics": {
"enabled": false,
"vars": {
"hosts": [
"http://localhost:9200"
],
"scope": "node"
},
"streams": {
"elasticsearch.stack_monitoring.ccr": {
"enabled": false
},
"elasticsearch.stack_monitoring.cluster_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.enrich": {
"enabled": false
},
"elasticsearch.stack_monitoring.index": {
"enabled": false
},
"elasticsearch.stack_monitoring.index_recovery": {
"enabled": false,
"vars": {
"active.only": true
}
},
"elasticsearch.stack_monitoring.index_summary": {
"enabled": false
},
"elasticsearch.stack_monitoring.ml_job": {
"enabled": false
},
"elasticsearch.stack_monitoring.node": {
"enabled": false
},
"elasticsearch.stack_monitoring.node_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.pending_tasks": {
"enabled": false
},
"elasticsearch.stack_monitoring.shard": {
"enabled": false
}
}
}
},
"force": true
}

View File

@@ -8,9 +8,7 @@
{% endif %}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% set AGENT_EXISTS = salt['file.file_exists']('/opt/Elastic/Agent/elastic-agent') %}
{% if not AGENT_STATUS or not AGENT_EXISTS %}
{% if not AGENT_STATUS %}
pull_agent_installer:
file.managed:
@@ -21,7 +19,7 @@ pull_agent_installer:
run_installer:
cmd.run:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} -force
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
- cwd: /opt/so
- retry:
attempts: 3

View File

@@ -1,186 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
# Start -- Elastic Fleet Host Cert
etc_elasticfleet_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-server.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-server.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-server.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
efperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- group: 939
chownelasticfleetcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Host Cert
{% endif %} # endif is for not including HeavyNodes & Receivers
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
etc_elasticfleet_agent_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-agent.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
- prereq:
- x509: etc_elasticfleet_agent_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_agent_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-agent.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-agent.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_agent_key
efagentperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- group: 939
chownelasticfleetagentcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetagentkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
elasticfleet_kafka_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
- prereq:
- x509: elasticfleet_kafka_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-kafka.crt
- ca_server: {{ CA.server }}
- signing_policy: kafka
- private_key: /etc/pki/elasticfleet-kafka.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_cert_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.crt
- mode: 640
- user: 947
- group: 939
elasticfleet_kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.key
- mode: 640
- user: 947
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -17,9 +17,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json; do
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
@@ -40,7 +40,8 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
done
# Grid Nodes - General
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_general/*.json; do
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_general/*.json
do
printf "\n\nGrid Nodes Policy_General - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
@@ -59,9 +60,13 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
fi
fi
done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Grid Nodes - Heavy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_heavy/*.json; do
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_heavy/*.json
do
printf "\n\nGrid Nodes Policy_Heavy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
@@ -73,16 +78,22 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
fi
else
printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi
fi
done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Fleet Server - Optional integrations
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json; do
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json
do
if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then
FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7`
printf "\n\nFleet Server Policy - Loading $INTEGRATION\n"
@@ -106,8 +117,6 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
fi
fi
done
# Only create the state file if all policies were created/updated successfully
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi

View File

@@ -14,7 +14,7 @@ if ! is_manager_node; then
fi
# Get current list of Grid Node Agents that need to be upgraded
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null)
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")

View File

@@ -26,7 +26,7 @@ function update_es_urls() {
}
# Get current list of Fleet Elasticsearch URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null)
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -142,7 +142,7 @@ function update_kafka_outputs() {
{% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null)
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -168,7 +168,7 @@ function update_kafka_outputs() {
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %}
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null)
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -241,11 +241,9 @@ printf '%s\n'\
"" >> "$global_pillar_file"
# Call Elastic-Fleet Salt State
printf "\nApplying elasticfleet state"
salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers
printf "\nApplying elasticfleet.install_agent_grid state"
salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0

View File

@@ -23,7 +23,7 @@ function update_fleet_urls() {
}
# Get current list of Fleet Server URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null)
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -34,11 +34,6 @@ if [[ "$RETURN_CODE" != "0" ]]; then
exit 1
fi
if [[ ! -f /etc/pki/elasticfleet-kafka.crt || ! -f /etc/pki/elasticfleet-kafka.key ]]; then
echo -e "\nKafka certificates not found, can't setup Elastic Fleet output policy for Kafka...\n"
exit 1
fi
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)

View File

@@ -26,14 +26,14 @@ catrustscript:
GLOBALS: {{ GLOBALS }}
{% endif %}
elasticsearch_cacerts:
cacertz:
file.managed:
- name: /opt/so/conf/ca/cacerts
- source: salt://elasticsearch/cacerts
- user: 939
- group: 939
elasticsearch_capems:
capemz:
file.managed:
- name: /opt/so/conf/ca/tls-ca-bundle.pem
- source: salt://elasticsearch/tls-ca-bundle.pem

View File

@@ -5,6 +5,11 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
include:
- ssl
- elasticsearch.ca
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}

View File

@@ -6,8 +6,6 @@ elasticsearch:
action:
destructive_requires_name: true
cluster:
logsdb:
enabled: false
routing:
allocation:
disk:
@@ -693,6 +691,7 @@ elasticsearch:
match_mapping_type: string
settings:
index:
final_pipeline: .fleet_final_pipeline-1
lifecycle:
name: so-import-logs
mapping:

View File

@@ -14,9 +14,6 @@
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
include:
- ca
- elasticsearch.ca
- elasticsearch.ssl
- elasticsearch.config
- elasticsearch.sostatus
@@ -64,7 +61,11 @@ so-elasticsearch:
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
- /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro
{% if GLOBALS.is_manager %}
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% endif %}
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
@@ -81,21 +82,22 @@ so-elasticsearch:
{% endfor %}
{% endif %}
- watch:
- file: trusttheca
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: cacertz
- file: esyml
- require:
- file: trusttheca
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml
- file: eslog4jfile
- file: nsmesdir
- file: eslogdir
- file: cacertz
- x509: /etc/pki/elasticsearch.crt
- x509: /etc/pki/elasticsearch.key
- file: elasticp12perms
{% if GLOBALS.is_manager %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
- cmd: auth_users_roles_inode
- cmd: auth_users_inode

View File

@@ -1,212 +1,31 @@
{
"version": 3,
"_meta": {
"managed_by": "securityonion",
"managed": true
},
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [
{
"set": {
"ignore_failure": true,
"field": "event.module",
"value": "elastic_agent"
}
},
{
"split": {
"if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')",
"field": "event.dataset",
"separator": "\\.",
"target_field": "module_temp"
}
},
{
"split": {
"if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')",
"field": "data_stream.dataset",
"separator": "\\.",
"target_field": "datastream_dataset_temp",
"ignore_missing": true
}
},
{
"set": {
"if": "ctx.module_temp != null",
"override": true,
"field": "event.module",
"value": "{{module_temp.0}}"
}
},
{
"set": {
"if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'",
"field": "event.module",
"value": "{{ datastream_dataset_temp.0 }}",
"ignore_failure": true,
"ignore_empty_value": true,
"description": "Fix EA network packet capture"
}
},
{
"gsub": {
"if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')",
"field": "event.dataset",
"pattern": "^[^.]*.",
"replacement": "",
"target_field": "dataset_tag_temp"
}
},
{
"append": {
"if": "ctx.dataset_tag_temp != null",
"field": "tags",
"value": "{{dataset_tag_temp}}",
"allow_duplicates": false
}
},
{
"set": {
"if": "ctx.network?.direction == 'egress'",
"override": true,
"field": "network.initiated",
"value": "true"
}
},
{
"set": {
"if": "ctx.network?.direction == 'ingress'",
"override": true,
"field": "network.initiated",
"value": "false"
}
},
{
"set": {
"if": "ctx.network?.type == 'ipv4'",
"override": true,
"field": "destination.ipv6",
"value": "false"
}
},
{
"set": {
"if": "ctx.network?.type == 'ipv6'",
"override": true,
"field": "destination.ipv6",
"value": "true"
}
},
{
"set": {
"if": "ctx.tags != null && ctx.tags.contains('import')",
"override": true,
"field": "data_stream.dataset",
"value": "import"
}
},
{
"set": {
"if": "ctx.tags != null && ctx.tags.contains('import')",
"override": true,
"field": "data_stream.namespace",
"value": "so"
}
},
{
"community_id": {
"if": "ctx.event?.dataset == 'endpoint.events.network'",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx.event?.module == 'fim'",
"override": true,
"field": "event.module",
"value": "file_integrity"
}
},
{
"rename": {
"if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'",
"ignore_missing": true,
"field": "winlog.event_data.Threat Name",
"target_field": "winlog.event_data.threat_name"
}
},
{
"set": {
"if": "ctx?.metadata?.kafka != null",
"field": "kafka.id",
"value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'",
"field": "event.module",
"value": "elasticsearch"
}
},
{
"append": {
"field": "related.ip",
"value": [
"{{source.ip}}",
"{{destination.ip}}"
],
"allow_duplicates": false,
"if": "ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null",
"ignore_failure": true
}
},
{
"foreach": {
"field": "host.ip",
"processor": {
"append": {
"field": "related.ip",
"value": "{{_ingest._value}}",
"allow_duplicates": false
}
},
"if": "ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null",
"ignore_missing": true,
"description": "Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"
}
},
{
"pipeline": {
"name": ".fleet_final_pipeline-1",
"ignore_missing_pipeline": true
}
},
{
"remove": {
"field": "event.agent_id_status",
"ignore_missing": true,
"if": "ctx?.event?.agent_id_status == 'auth_metadata_missing'"
}
},
{
"remove": {
"field": [
"message2",
"type",
"fields",
"category",
"module",
"dataset",
"event.dataset_temp",
"dataset_tag_temp",
"module_temp",
"datastream_dataset_temp"
],
"ignore_missing": true,
"ignore_failure": true
}
}
]
}
"version": 3,
"_meta": {
"managed_by": "securityonion",
"managed": true
},
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "split": { "if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')", "field":"data_stream.dataset", "separator":"\\.", "target_field":"datastream_dataset_temp", "ignore_missing":true } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
{ "set": { "if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'", "field":"event.module", "value":"{{ datastream_dataset_temp.0 }}", "ignore_failure":true, "ignore_empty_value":true, "description":"Fix EA network packet capture" } },
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}", "allow_duplicates": false } },
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
]
}

View File

@@ -27,13 +27,6 @@ elasticsearch:
readonly: True
global: True
helpLink: elasticsearch.html
logsdb:
enabled:
description: Enables or disables the Elasticsearch logsdb index mode. When enabled, most logs-* datastreams will convert to logsdb from standard after rolling over.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
routing:
allocation:
disk:

View File

@@ -1,66 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
# Create a cert for elasticsearch
elasticsearch_key:
x509.private_key_managed:
- name: /etc/pki/elasticsearch.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticsearch_crt:
x509.certificate_managed:
- name: /etc/pki/elasticsearch.crt
- ca_server: {{ CA.server }}
- signing_policy: registry
- private_key: /etc/pki/elasticsearch.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:
- x509: /etc/pki/elasticsearch.key
elastickeyperms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.key
- mode: 640
- group: 930
elasticp12perms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.p12
- mode: 640
- group: 930
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -2,7 +2,7 @@
"template": {
"settings": {
"index": {
"final_pipeline": "global@custom"
"final_pipeline": ".fleet_final_pipeline-1"
}
},
"mappings": {

View File

@@ -14,9 +14,8 @@ set -e
# Check to see if we have extracted the ca cert.
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
# Make sure symbolic links are followed when copying from container
docker cp -L so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
docker cp -L so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker rm so-elasticsearchca
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem

View File

@@ -121,7 +121,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
echo "Loading Security Onion index templates..."
shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)"
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %}
pattern="*"
{% endif %}

View File

@@ -32,7 +32,7 @@ global:
readonly: True
advanced: True
url_base:
description: The base URL for the Security Onion Console. Must be accessible by all nodes in the grid, as well as all analysts. Also used for handling of authentication cookies. Can be an IP address or a hostname/FQDN. Do not include protocol (http/https) or port number.
description: Used for handling of authentication cookies.
global: True
airgap:
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.

View File

@@ -9,6 +9,7 @@
include:
- salt.minion
- ssl
# Influx DB
influxconfdir:

View File

@@ -11,7 +11,6 @@
{% set TOKEN = salt['pillar.get']('influxdb:token') %}
include:
- influxdb.ssl
- influxdb.config
- influxdb.sostatus
@@ -60,8 +59,6 @@ so-influxdb:
{% endif %}
- watch:
- file: influxdbconf
- x509: influxdb_key
- x509: influxdb_crt
- require:
- file: influxdbconf
- x509: influxdb_key

View File

@@ -1,55 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
influxdb_key:
x509.private_key_managed:
- name: /etc/pki/influxdb.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/influxdb.key') -%}
- prereq:
- x509: /etc/pki/influxdb.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Create a cert for the talking to influxdb
influxdb_crt:
x509.certificate_managed:
- name: /etc/pki/influxdb.crt
- ca_server: {{ CA.server }}
- signing_policy: influxdb
- private_key: /etc/pki/influxdb.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
influxkeyperms:
file.managed:
- replace: False
- name: /etc/pki/influxdb.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -68,8 +68,6 @@ so-kafka:
- file: kafka_server_jaas_properties
{% endif %}
- file: kafkacertz
- x509: kafka_crt
- file: kafka_pkcs12_perms
- require:
- file: kafkacertz
@@ -97,4 +95,4 @@ include:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -6,13 +6,22 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
{% set kafka_password = salt['pillar.get']('kafka:config:password') %}
include:
- ca
- ca.dirs
{% set global_ca_server = [] %}
{% set x509dict = salt['mine.get'](GLOBALS.manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set ca_server = global_ca_server[0] %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
{% if GLOBALS.pipeline == "KAFKA" %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
kafka_client_key:
x509.private_key_managed:
- name: /etc/pki/kafka-client.key
@@ -30,12 +39,12 @@ kafka_client_key:
kafka_client_crt:
x509.certificate_managed:
- name: /etc/pki/kafka-client.crt
- ca_server: {{ CA.server }}
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka-client.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
@@ -58,9 +67,9 @@ kafka_client_crt_perms:
- mode: 640
- user: 960
- group: 939
{% endif %}
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
kafka_key:
x509.private_key_managed:
- name: /etc/pki/kafka.key
@@ -78,12 +87,12 @@ kafka_key:
kafka_crt:
x509.certificate_managed:
- name: /etc/pki/kafka.crt
- ca_server: {{ CA.server }}
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
@@ -94,7 +103,6 @@ kafka_crt:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
- onchanges:
- x509: /etc/pki/kafka.key
kafka_key_perms:
file.managed:
- replace: False
@@ -118,11 +126,11 @@ kafka_pkcs12_perms:
- mode: 640
- user: 960
- group: 939
{% endif %}
{% endif %}
# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka.
# Manager will have cert, but be unused until a pipeline is created and logstash enabled.
{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
kafka_logstash_key:
x509.private_key_managed:
- name: /etc/pki/kafka-logstash.key
@@ -140,12 +148,12 @@ kafka_logstash_key:
kafka_logstash_crt:
x509.certificate_managed:
- name: /etc/pki/kafka-logstash.crt
- ca_server: {{ CA.server }}
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka-logstash.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
@@ -181,6 +189,7 @@ kafka_logstash_pkcs12_perms:
- user: 931
- group: 939
{% endif %}
{% endif %}
{% else %}
@@ -189,4 +198,4 @@ kafka_logstash_pkcs12_perms:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -25,10 +25,11 @@ kibana:
discardCorruptObjects: "8.18.8"
telemetry:
enabled: False
security:
showInsecureClusterWarning: False
xpack:
security:
secureCookies: true
showInsecureClusterWarning: false
reporting:
kibanaServer:
hostname: localhost

View File

@@ -10,10 +10,11 @@
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
{% set ASSIGNED_PIPELINES = LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
include:
- ssl
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
- elasticsearch
{% endif %}
{% endif %}
# Create the logstash group
logstashgroup:

View File

@@ -12,7 +12,6 @@
{% set lsheap = LOGSTASH_MERGED.settings.lsheap %}
include:
- ca
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
- elasticsearch.ca
{% endif %}
@@ -21,9 +20,9 @@ include:
- kafka.ca
- kafka.ssl
{% endif %}
- logstash.ssl
- logstash.config
- logstash.sostatus
- ssl
so-logstash:
docker_container.running:
@@ -66,18 +65,22 @@ so-logstash:
- /opt/so/log/logstash:/var/log/logstash:rw
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
{% if GLOBALS.role != 'so-fleet' %}
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
{% endif %}
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
{% endif %}
@@ -97,22 +100,11 @@ so-logstash:
{% endfor %}
{% endif %}
- watch:
- file: lsetcsync
- file: trusttheca
{% if GLOBALS.is_manager %}
- file: elasticsearch_cacerts
- file: elasticsearch_capems
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- x509: etc_elasticfleet_logstash_crt
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-receiver'] %}
- x509: etc_elasticfleet_logstash_key
- x509: etc_elasticfleetlumberjack_crt
- x509: etc_elasticfleetlumberjack_key
{% if GLOBALS.role != 'so-fleet' %}
- x509: etc_filebeat_crt
- file: logstash_filebeat_p8
{% endif %}
- x509: etc_elasticfleet_logstash_crt
{% endif %}
- file: lsetcsync
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
- file: ls_pipeline_{{assigned_pipeline}}
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
@@ -123,20 +115,17 @@ so-logstash:
- file: kafkacertz
{% endif %}
- require:
- file: trusttheca
{% if GLOBALS.is_manager %}
- file: elasticsearch_cacerts
- file: elasticsearch_capems
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- x509: etc_elasticfleet_logstash_crt
- x509: etc_elasticfleet_logstash_key
- x509: etc_elasticfleetlumberjack_crt
- x509: etc_elasticfleetlumberjack_key
{% if GLOBALS.role != 'so-fleet' %}
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
- x509: etc_filebeat_crt
- file: logstash_filebeat_p8
{% endif %}
{% endif %}
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
{% if grains.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- file: cacertz
- file: capemz
{% endif %}
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
- file: kafkacertz

View File

@@ -1,287 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states or sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode'] %}
# Start -- Elastic Fleet Logstash Input Cert
etc_elasticfleet_logstash_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-logstash.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-logstash.key') -%}
- prereq:
- x509: etc_elasticfleet_logstash_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_logstash_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-logstash.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-logstash.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-logstash.key -topk8 -out /etc/pki/elasticfleet-logstash.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_logstash_key
eflogstashperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- group: 939
chownelasticfleetlogstashcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.crt
- mode: 640
- user: 931
- group: 939
chownelasticfleetlogstashkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Logstash Input Cert
{% endif %} # endif is for not including HeavyNodes
# Start -- Elastic Fleet Node - Logstash Lumberjack Input / Output
# Cert needed on: Managers, Receivers
etc_elasticfleetlumberjack_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-lumberjack.key
- bits: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-lumberjack.key') -%}
- prereq:
- x509: etc_elasticfleetlumberjack_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleetlumberjack_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-lumberjack.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-lumberjack.key
- CN: {{ GLOBALS.node_ip }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-lumberjack.key -topk8 -out /etc/pki/elasticfleet-lumberjack.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleetlumberjack_key
eflogstashlumberjackperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- group: 939
chownilogstashelasticfleetlumberjackp8:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.p8
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.crt
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Node - Logstash Lumberjack Input / Output
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-receiver'] %}
etc_filebeat_key:
x509.private_key_managed:
- name: /etc/pki/filebeat.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/filebeat.key') -%}
- prereq:
- x509: etc_filebeat_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
etc_filebeat_crt:
x509.certificate_managed:
- name: /etc/pki/filebeat.crt
- ca_server: {{ CA.server }}
- signing_policy: filebeat
- private_key: /etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt"
- onchanges:
- x509: etc_filebeat_key
fbperms:
file.managed:
- replace: False
- name: /etc/pki/filebeat.key
- mode: 640
- group: 939
logstash_filebeat_p8:
file.managed:
- replace: False
- name: /etc/pki/filebeat.p8
- mode: 640
- user: 931
- group: 939
{% if grains.role not in ['so-heavynode', 'so-receiver'] %}
# Create Symlinks to the keys so I can distribute it to all the things
filebeatdir:
file.directory:
- name: /opt/so/saltstack/local/salt/filebeat/files
- makedirs: True
fbkeylink:
file.symlink:
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8
- target: /etc/pki/filebeat.p8
- user: socore
- group: socore
fbcrtlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt
- target: /etc/pki/filebeat.crt
- user: socore
- group: socore
{% endif %}
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-sensor', 'so-searchnode', 'so-heavynode', 'so-fleet', 'so-idh', 'so-receiver'] %}
fbcertdir:
file.directory:
- name: /opt/so/conf/filebeat/etc/pki
- makedirs: True
conf_filebeat_key:
x509.private_key_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%}
- prereq:
- x509: conf_filebeat_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
conf_filebeat_crt:
x509.certificate_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
- ca_server: {{ CA.server }}
- signing_policy: filebeat
- private_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
# Convert the key to pkcs#8 so logstash will work correctly.
filebeatpkcs:
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:"
- onchanges:
- x509: conf_filebeat_key
filebeatkeyperms:
file.managed:
- replace: False
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- mode: 640
- group: 939
chownfilebeatp8:
file.managed:
- replace: False
- name: /opt/so/conf/filebeat/etc/pki/filebeat.p8
- mode: 640
- user: 931
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
elastic_curl_config_distributed:
file.managed:
- name: /opt/so/saltstack/local/salt/elasticsearch/curl.config

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
kibana_curl_config_distributed:
file.managed:
- name: /opt/so/conf/kibana/curl.config
@@ -10,4 +5,4 @@ kibana_curl_config_distributed:
- template: jinja
- mode: 600
- show_changes: False
- makedirs: True
- makedirs: True

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
include:
- elasticsearch.auth
- kratos

View File

@@ -716,18 +716,6 @@ function checkMine() {
}
}
function create_ca_pillar() {
local capillar=/opt/so/saltstack/local/pillar/ca/init.sls
printf '%s\n'\
"ca:"\
" server: $MINION_ID"\
" " > $capillar
if [ $? -ne 0 ]; then
log "ERROR" "Failed to add $MINION_ID to $capillar"
return 1
fi
}
function createEVAL() {
log "INFO" "Creating EVAL configuration for minion $MINION_ID"
is_pcaplimit=true
@@ -839,6 +827,7 @@ function createHEAVYNODE() {
add_elastic_agent_to_minion || return 1
add_sensor_to_minion || return 1
add_strelka_to_minion || return 1
add_redis_to_minion || return 1
add_telegraf_to_minion || return 1
}
@@ -1024,7 +1013,6 @@ function setupMinionFiles() {
managers=("EVAL" "STANDALONE" "IMPORT" "MANAGER" "MANAGERSEARCH")
if echo "${managers[@]}" | grep -qw "$NODETYPE"; then
add_sensoroni_with_analyze_to_minion || return 1
create_ca_pillar || return 1
else
add_sensoroni_to_minion || return 1
fi

View File

@@ -93,10 +93,6 @@ check_err() {
161)
echo 'Required intermediate Elasticsearch upgrade not complete'
;;
170)
echo "Intermediate upgrade completed successfully to $next_step_so_version, but next soup to Security Onion $originally_requested_so_version could not be started automatically."
echo "Start soup again manually to continue the upgrade to Security Onion $originally_requested_so_version."
;;
*)
echo 'Unhandled error'
echo "$err_msg"
@@ -158,7 +154,7 @@ EOF
echo "Ensure you verify the ISO that you downloaded."
exit 0
else
echo "Device has been mounted! $(cat /tmp/soagupdate/SecurityOnion/VERSION)"
echo "Device has been mounted!"
fi
else
echo "Could not find Security Onion ISO content at ${ISOLOC}"
@@ -329,19 +325,6 @@ clone_to_tmp() {
fi
}
# there is a function like this in so-minion, but we cannot source it since args required for so-minion
create_ca_pillar() {
local ca_pillar_dir="/opt/so/saltstack/local/pillar/ca"
local ca_pillar_file="${ca_pillar_dir}/init.sls"
echo "Updating CA pillar configuration"
mkdir -p "$ca_pillar_dir"
echo "ca: {}" > "$ca_pillar_file"
so-yaml.py add "$ca_pillar_file" ca.server "$MINIONID"
chown -R socore:socore "$ca_pillar_dir"
}
disable_logstash_heavynodes() {
c=0
printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
@@ -357,22 +340,6 @@ disable_logstash_heavynodes() {
done
}
disable_redis_heavynodes() {
local c=0
printf "\nChecking for heavynodes and disabling Redis if they exist\n"
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
c=1
echo "Disabling Redis for: $file"
so-yaml.py replace "$file" redis.enabled False
fi
done
if [[ "$c" != 0 ]]; then
FINAL_MESSAGE_QUEUE+=("Redis has been disabled on all heavynodes.")
fi
}
enable_highstate() {
echo "Enabling highstate."
salt-call state.enable highstate -l info --local
@@ -401,6 +368,7 @@ masterlock() {
echo "base:" > $TOPFILE
echo " $MINIONID:" >> $TOPFILE
echo " - ca" >> $TOPFILE
echo " - ssl" >> $TOPFILE
echo " - elasticsearch" >> $TOPFILE
}
@@ -465,8 +433,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201
[[ "$INSTALLEDVERSION" == 2.4.201 ]] && up_to_2.4.210
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.210
true
}
@@ -481,26 +448,25 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
[[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
[[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110
[[ "$POSTVERSION" == 2.4.110 ]] && post_to_2.4.111
[[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120
[[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201
[[ "$POSTVERSION" == 2.4.201 ]] && post_to_2.4.210
[[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120
[[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.210
true
}
@@ -684,20 +650,11 @@ post_to_2.4.200() {
POSTVERSION=2.4.200
}
post_to_2.4.201() {
echo "Nothing to apply"
POSTVERSION=2.4.201
}
post_to_2.4.210() {
echo "Rolling over Kratos index to apply new index template"
rollover_index "logs-kratos-so"
disable_redis_heavynodes
initialize_elasticsearch_indices "so-case so-casehistory so-assistant-session so-assistant-chat"
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
@@ -978,19 +935,10 @@ up_to_2.4.200() {
INSTALLEDVERSION=2.4.200
}
up_to_2.4.201() {
echo "Nothing to do for 2.4.201"
INSTALLEDVERSION=2.4.201
}
up_to_2.4.210() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
create_ca_pillar
# This state is used to deal with the breaking change introduced in 3006.17 - https://docs.saltproject.io/en/3006/topics/releases/3006.17.html
# This is the only way the state is called so we can use concurrent=True
salt-call state.apply salt.master.add_minimum_auth_version --file-root=$UPDATE_DIR/salt --local concurrent=True
INSTALLEDVERSION=2.4.210
}
@@ -1688,218 +1636,104 @@ verify_latest_update_script() {
verify_es_version_compatibility() {
local es_required_version_statefile_base="/opt/so/state/so_es_required_upgrade_version"
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
local is_active_intermediate_upgrade=1
# supported upgrade paths for SO-ES versions
declare -A es_upgrade_map=(
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
["8.17.3"]="8.18.4 8.18.6 8.18.8"
["8.18.4"]="8.18.6 8.18.8 9.0.8"
["8.18.6"]="8.18.8 9.0.8"
["8.18.8"]="9.0.8"
)
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt"
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
# supported upgrade paths for SO-ES versions
declare -A es_upgrade_map=(
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
["8.17.3"]="8.18.4 8.18.6 8.18.8"
["8.18.4"]="8.18.6 8.18.8 9.0.8"
["8.18.6"]="8.18.8 9.0.8"
["8.18.8"]="9.0.8"
)
# Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=(
["8.18.8"]="2.4.190-20251024"
)
# Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=(
["8.18.8"]="2.4.190-20251024"
)
# Get current Elasticsearch version
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
else
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
# Get current Elasticsearch version
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
else
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
exit 160
fi
exit 160
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
exit 160
fi
if ! target_es_version_raw=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
fi
exit 160
fi
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
else
target_es_version=$(sed -n '1p' <<< "$target_es_version_raw")
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
if [[ -f "$es_required_version_statefile" ]]; then
# required so verification script should have already been created
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script $es_verification_script
fi
for statefile in "${es_required_version_statefile_base}"-*; do
[[ -f $statefile ]] || continue
# create script using version in statefile
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
timeout --foreground 3600 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
local es_required_version_statefile_value=$(cat "$statefile")
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
is_active_intermediate_upgrade=0
continue
fi
# use sort to check if es_required_statefile_value is < the current es_version.
if [[ "$(printf '%s\n' $es_required_version_statefile_value $es_version | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
rm -f "$statefile"
continue
fi
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script "$es_verification_script"
fi
echo -e "\n##############################################################################################################################\n"
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
echo -e "\n##############################################################################################################################\n"
done
# if current soup is an intermediate upgrade we can skip the upgrade map check below
if [[ $is_active_intermediate_upgrade -eq 0 ]]; then
return 0
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
if [[ -z "$compatible_versions" ]]; then
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
next_step_so_version=${es_to_so_version[$first_es_required_version]}
required_es_upgrade_version="$first_es_required_version"
else
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
required_es_upgrade_version="${compatible_versions##* }"
fi
echo -e "\n##############################################################################################################################\n"
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
fi
es_required_version_statefile="${es_required_version_statefile_base}-${required_es_upgrade_version}"
echo "$required_es_upgrade_version" > "$es_required_version_statefile"
# We expect to upgrade to the latest compatiable minor version of ES
create_intermediate_upgrade_verification_script "$es_verification_script"
if [[ $is_airgap -eq 0 ]]; then
run_airgap_intermediate_upgrade
else
if [[ ! -z $ISOLOC ]]; then
originally_requested_iso_location="$ISOLOC"
fi
# Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
unset ISOLOC
run_network_intermediate_upgrade
fi
fi
}
run_airgap_intermediate_upgrade() {
local originally_requested_so_version=$(cat $UPDATE_DIR/VERSION)
# preserve ISOLOC value, so we can try to use it post intermediate upgrade
local originally_requested_iso_location="$ISOLOC"
# make sure a fresh ISO gets mounted
unmount_update
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
echo -e "\nIf you have the next ISO / USB ready, enter the path now eg. /dev/sdd, /home/onion/securityonion-$next_step_so_version.iso:"
while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
# List removable devices if any are present
local removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
if [[ -n "$removable_devices" ]]; then
echo "PATH SIZE TYPE MOUNTPOINTS RM"
echo "$removable_devices"
fi
read -rp "Device/ISO Path (or 'exit' to quit): " next_iso_location
if [[ "${next_iso_location,,}" == "exit" ]]; then
echo "Exiting soup. Before reattempting to upgrade to $originally_requested_so_version, please first upgrade to $next_step_so_version to ensure Elasticsearch can properly update through the required versions."
exit 160
fi
if [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; then
echo "$next_iso_location is not a valid file or block device."
next_iso_location=""
fi
done
echo "Using $next_iso_location for required intermediary upgrade."
exec bash <<EOF
ISOLOC=$next_iso_location soup -y && \
ISOLOC=$next_iso_location soup -y && \
echo -e "\n##############################################################################################################################\n" && \
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e "\n##############################################################################################################################\n" && \
# automatically start the next soup if the original ISO isn't using the same block device we just used
if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
umount /tmp/soagupdate
ISOLOC=$originally_requested_iso_location soup -y && \
ISOLOC=$originally_requested_iso_location soup -y
else
echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \
exit 170
fi
echo -e "\n##############################################################################################################################\n"
EOF
}
run_network_intermediate_upgrade() {
# preserve BRANCH value if set originally
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_branch="$BRANCH"
else
local originally_requested_so_branch="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
echo -e "\n##############################################################################################################################\n"
exec bash << EOF
BRANCH=$next_step_so_version soup -y && \
BRANCH=$next_step_so_version soup -y && \
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
echo -e "\n##############################################################################################################################\n" && \
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
echo "${compatible_versions##* }" > "$es_required_version_statefile"
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
# We expect to upgrade to the latest compatiable minor version of ES
create_intermediate_upgrade_verification_script $es_verification_script
if [[ $is_airgap -eq 0 ]]; then
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
echo -e "\n##############################################################################################################################\n"
exit 160
else
# preserve BRANCH value if set originally
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_version="$BRANCH"
else
local originally_requested_so_version="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n"
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
echo -e \"\n##############################################################################################################################\n\" && \
echo -e \"Verifying Elasticsearch was successfully upgraded to ${compatible_versions##* } across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
&& timeout --foreground 3600 bash /tmp/so_intermediate_upgrade_verification.sh ${compatible_versions##* } $es_required_version_statefile && \
echo -e \"\n##############################################################################################################################\n\" \
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
fi
fi
echo -e "\n##############################################################################################################################\n" && \
if [[ -n "$originally_requested_iso_location" ]]; then
# nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
ISOLOC=$originally_requested_iso_location soup -y && \
ISOLOC=$originally_requested_iso_location soup -y
else
BRANCH=$originally_requested_so_branch soup -y && \
BRANCH=$originally_requested_so_branch soup -y
fi
echo -e "\n##############################################################################################################################\n"
EOF
}
create_intermediate_upgrade_verification_script() {
@@ -1936,10 +1770,10 @@ create_intermediate_upgrade_verification_script() {
local retries=20
local retry_count=0
local delay=180
local success=1
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2> /dev/null)
heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2>&1)
local exit_status=$?
# Check that all heavynodes returned good data
@@ -1955,7 +1789,7 @@ create_intermediate_upgrade_verification_script() {
return 0
else
echo "One or more heavynodes are not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
@@ -1980,10 +1814,11 @@ create_intermediate_upgrade_verification_script() {
local retries=20
local retry_count=0
local delay=180
local success=1
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 --fail 2>&1)
cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 2>&1)
local exit_status=$?
if [[ $exit_status -ne 0 ]]; then
@@ -2060,7 +1895,7 @@ apply_hotfix() {
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
fi
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
@@ -2130,7 +1965,6 @@ main() {
echo "Verifying we have the latest soup script."
verify_latest_update_script
echo "Verifying Elasticsearch version compatibility before upgrading."
verify_es_version_compatibility
echo "Let's see if we need to update Security Onion."
@@ -2260,7 +2094,7 @@ main() {
echo ""
echo "Running a highstate. This could take several minutes."
set +e
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e
@@ -2273,7 +2107,7 @@ main() {
check_saltmaster_status
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
# Stop long-running scripts to allow potentially updated scripts to load on the next execution.
killall salt-relay.sh

View File

@@ -6,6 +6,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
include:
- ssl
# Drop the correct nginx config based on role
nginxconfdir:
file.directory:

View File

@@ -8,14 +8,81 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'nginx/map.jinja' import NGINXMERGED %}
{% set ca_server = GLOBALS.minion_id %}
include:
- nginx.ssl
- nginx.config
- nginx.sostatus
{% if GLOBALS.role != 'so-fleet' %}
{% set container_config = 'so-nginx' %}
{% if grains.role not in ['so-fleet'] %}
{# if the user has selected to replace the crt and key in the ui #}
{% if NGINXMERGED.ssl.replace_cert %}
managerssl_key:
file.managed:
- name: /etc/pki/managerssl.key
- source: salt://nginx/ssl/ssl.key
- mode: 640
- group: 939
- watch_in:
- docker_container: so-nginx
managerssl_crt:
file.managed:
- name: /etc/pki/managerssl.crt
- source: salt://nginx/ssl/ssl.crt
- mode: 644
- watch_in:
- docker_container: so-nginx
{% else %}
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
# Create a cert for the reverse proxy
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ ca_server }}
- signing_policy: managerssl
- private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
{% endif %}
msslkeyperms:
file.managed:
- replace: False
- name: /etc/pki/managerssl.key
- mode: 640
- group: 939
make-rule-dir-nginx:
file.directory:
- name: /nsm/rules
@@ -25,12 +92,16 @@ make-rule-dir-nginx:
- user
- group
- show_changes: False
{% else %}
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
{% set container_config = 'so-nginx-fleet-node' %}
{% endif %}
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
{% if GLOBALS.role == 'so-fleet' %}
{% set container_config = 'so-nginx-fleet-node' %}
{% else %}
{% set container_config = 'so-nginx' %}
{% endif %}
so-nginx:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
@@ -83,27 +154,18 @@ so-nginx:
- watch:
- file: nginxconf
- file: nginxconfdir
{% if GLOBALS.is_manager %}
{% if NGINXMERGED.ssl.replace_cert %}
- file: managerssl_key
- file: managerssl_crt
{% else %}
- x509: managerssl_key
- x509: managerssl_crt
{% endif%}
{% endif %}
- require:
- file: nginxconf
{% if GLOBALS.is_manager %}
{% if NGINXMERGED.ssl.replace_cert %}
{% if GLOBALS.is_manager %}
{% if NGINXMERGED.ssl.replace_cert %}
- file: managerssl_key
- file: managerssl_crt
{% else %}
{% else %}
- x509: managerssl_key
- x509: managerssl_crt
{% endif%}
{% endif%}
- file: navigatorconfig
{% endif %}
{% endif %}
delete_so-nginx_so-status.disabled:
file.uncomment:

View File

@@ -1,87 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'nginx/map.jinja' import NGINXMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.role != 'so-fleet' %}
{# if the user has selected to replace the crt and key in the ui #}
{% if NGINXMERGED.ssl.replace_cert %}
managerssl_key:
file.managed:
- name: /etc/pki/managerssl.key
- source: salt://nginx/ssl/ssl.key
- mode: 640
- group: 939
- watch_in:
- docker_container: so-nginx
managerssl_crt:
file.managed:
- name: /etc/pki/managerssl.crt
- source: salt://nginx/ssl/ssl.crt
- mode: 644
- watch_in:
- docker_container: so-nginx
{% else %}
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
# Create a cert for the reverse proxy
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ CA.server }}
- signing_policy: managerssl
- private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
{% endif %}
msslkeyperms:
file.managed:
- replace: False
- name: /etc/pki/managerssl.key
- mode: 640
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,22 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states%}
stenoca:
file.directory:
- name: /opt/so/conf/steno/certs
- user: 941
- group: 939
- makedirs: True
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -57,6 +57,12 @@ stenoconf:
PCAPMERGED: {{ PCAPMERGED }}
STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}"
stenoca:
file.directory:
- name: /opt/so/conf/steno/certs
- user: 941
- group: 939
pcaptmpdir:
file.directory:
- name: /nsm/pcaptmp

View File

@@ -10,7 +10,6 @@
include:
- pcap.ca
- pcap.config
- pcap.sostatus

View File

@@ -7,6 +7,9 @@
{% if sls.split('.')[0] in allowed_states %}
{% from 'redis/map.jinja' import REDISMERGED %}
include:
- ssl
# Redis Setup
redisconfdir:
file.directory:

View File

@@ -9,8 +9,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- ca
- redis.ssl
- redis.config
- redis.sostatus
@@ -33,7 +31,11 @@ so-redis:
- /nsm/redis/data:/data:rw
- /etc/pki/redis.crt:/certs/redis.crt:ro
- /etc/pki/redis.key:/certs/redis.key:ro
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/certs/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro
{% endif %}
{% if DOCKER.containers['so-redis'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %}
- {{ BIND }}
@@ -53,14 +55,16 @@ so-redis:
{% endif %}
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch:
- file: trusttheca
- x509: redis_crt
- x509: redis_key
- file: /opt/so/conf/redis/etc
- require:
- file: trusttheca
- file: redisconf
- x509: redis_crt
- x509: redis_key
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
delete_so-redis_so-status.disabled:
file.uncomment:

View File

@@ -1,54 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
redis_key:
x509.private_key_managed:
- name: /etc/pki/redis.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/redis.key') -%}
- prereq:
- x509: /etc/pki/redis.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
redis_crt:
x509.certificate_managed:
- name: /etc/pki/redis.crt
- ca_server: {{ CA.server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: registry
- private_key: /etc/pki/redis.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
rediskeyperms:
file.managed:
- replace: False
- name: /etc/pki/redis.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -6,6 +6,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
include:
- ssl
# Create the config directory for the docker registry
dockerregistryconfdir:
file.directory:

View File

@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- registry.ssl
- registry.config
- registry.sostatus
@@ -54,9 +53,6 @@ so-dockerregistry:
- retry:
attempts: 5
interval: 30
- watch:
- x509: registry_crt
- x509: registry_key
- require:
- file: dockerregistryconf
- x509: registry_crt

View File

@@ -1,77 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
include:
- ca
# Delete directory if it exists at the key path
registry_key_cleanup:
file.absent:
- name: /etc/pki/registry.key
- onlyif:
- test -d /etc/pki/registry.key
registry_key:
x509.private_key_managed:
- name: /etc/pki/registry.key
- keysize: 4096
- backup: True
- new: True
- require:
- file: registry_key_cleanup
{% if salt['file.file_exists']('/etc/pki/registry.key') -%}
- prereq:
- x509: /etc/pki/registry.crt
{%- endif %}
- retry:
attempts: 15
interval: 10
# Delete directory if it exists at the crt path
registry_crt_cleanup:
file.absent:
- name: /etc/pki/registry.crt
- onlyif:
- test -d /etc/pki/registry.crt
# Create a cert for the docker registry
registry_crt:
x509.certificate_managed:
- name: /etc/pki/registry.crt
- ca_server: {{ CA.server }}
- subjectAltName: DNS:{{ GLOBALS.manager }}, IP:{{ GLOBALS.manager_ip }}
- signing_policy: registry
- private_key: /etc/pki/registry.key
- CN: {{ GLOBALS.manager }}
- days_remaining: 7
- days_valid: 820
- backup: True
- require:
- file: registry_crt_cleanup
- timeout: 30
- retry:
attempts: 15
interval: 10
regkeyperms:
file.managed:
- replace: False
- name: /etc/pki/registry.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -46,6 +46,33 @@ def start(interval=60):
mine_update(minion)
continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try:

View File

@@ -1,73 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# -*- coding: utf-8 -*-
import logging
import os
import time
from datetime import datetime, timedelta
import salt.client
log = logging.getLogger(__name__)
TIMESTAMP_FILE = '/opt/so/state/mav_engine_start_time'
def _get_start_time():
"""Read persisted start time from file, or create one if it doesn't exist."""
if os.path.exists(TIMESTAMP_FILE):
with open(TIMESTAMP_FILE, 'r') as f:
timestamp = f.read().strip()
start_time = datetime.fromisoformat(timestamp)
log.info("Loaded existing start time from %s: %s", TIMESTAMP_FILE, start_time)
return start_time
start_time = datetime.now()
with open(TIMESTAMP_FILE, 'w') as f:
f.write(start_time.isoformat())
log.info("No existing start time found. Persisted new start time: %s", start_time)
return start_time
def _clear_start_time():
"""Remove the persisted timestamp file after successful completion."""
if os.path.exists(TIMESTAMP_FILE):
os.remove(TIMESTAMP_FILE)
log.info("Removed timestamp file %s", TIMESTAMP_FILE)
def start(wait_days=7):
"""
This engine waits for the specified number of days, then changes minimum_auth_version.
Args:
wait_days: Days to wait before taking action (default: 7)
"""
log.info(
"Starting minimum_auth_version engine - Wait time: %d days",
wait_days
)
start_time = _get_start_time()
wait_delta = timedelta(days=wait_days)
mav_removed = False
caller = salt.client.Caller()
while True:
if not mav_removed:
elapsed = datetime.now() - start_time
if elapsed >= wait_delta:
log.info("Changing minimum_auth_version")
_clear_start_time()
result = caller.cmd('state.apply', 'salt.master.remove_minimum_auth_version', queue=True)
# We shouldn't reach this line since the above line should remove the engine and restart salt-master
log.info("State apply result: %s", result)
mav_removed = True
else:
target_time = start_time + wait_delta
log.info("minimum_auth_version will be changed within an hour of %s", target_time.strftime('%m-%d-%Y %H:%M'))
time.sleep(3600) # Check hourly

View File

@@ -1,4 +1,4 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt:
master:
version: '3006.19'
version: '3006.16'

View File

@@ -1,23 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# This state is to be used during soup preupgrade_changes, and run when the salt-master has been stopped. Soup will later start the salt-master.
# This state is used to deal with the breaking change introduced in 3006.17 - https://docs.saltproject.io/en/3006/topics/releases/3006.17.html
set_minimum_auth_version_0:
file.managed:
- name: /etc/salt/master.d/minimum_auth_version.conf
- source: salt://salt/master/files/minimum_auth_version.conf
add_minimum_auth_version_engine_config:
file.managed:
- name: /etc/salt/master.d/minimum_auth_version_engine.conf
- source: salt://salt/master/files/minimum_auth_version_engine.conf
add_minimum_auth_version_engine:
file.managed:
- name: /etc/salt/engines/minimum_auth_version.py
- source: salt://salt/engines/master/minimum_auth_version.py

View File

@@ -1 +0,0 @@
minimum_auth_version: 0

View File

@@ -1,3 +0,0 @@
engines:
- minimum_auth_version:
wait_days: 7

View File

@@ -1,21 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
include:
- salt.master
unset_minimum_auth_version_0:
file.absent:
- name: /etc/salt/master.d/minimum_auth_version.conf
remove_minimum_auth_version_engine_config:
file.absent:
- name: /etc/salt/master.d/minimum_auth_version_engine.conf
remove_minimum_auth_version_engine:
file.absent:
- name: /etc/salt/engines/minimum_auth_version.py
- watch_in:
- service: salt_master_service

View File

@@ -18,6 +18,10 @@ mine_functions:
mine_functions:
network.ip_addrs:
- interface: {{ interface }}
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif %}
mine_update_mine_functions:
module.run:

View File

@@ -1,5 +1,5 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt:
minion:
version: '3006.19'
version: '3006.16'
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default

View File

@@ -17,8 +17,8 @@ include:
- repo.client
- salt.mine_functions
- salt.minion.service_file
{% if GLOBALS.is_manager %}
- ca.signing_policy
{% if GLOBALS.role in GLOBALS.manager_roles %}
- ca
{% endif %}
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
@@ -111,7 +111,7 @@ salt_minion_service:
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels
{% endif %}
{% if GLOBALS.is_manager %}
- file: signing_policy
{% if GLOBALS.role in GLOBALS.manager_roles %}
- file: /etc/salt/minion.d/signing_policies.conf
{% endif %}
- order: last

View File

@@ -26,7 +26,7 @@
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2026.01.22"
__ScriptVersion="2025.09.03"
__ScriptName="bootstrap-salt.sh"
__ScriptFullName="$0"
@@ -369,7 +369,7 @@ __usage() {
also be specified. Salt installation will be ommitted, but some of the
dependencies could be installed to write configuration with -j or -J.
-d Disables checking if Salt services are enabled to start on system boot.
You can also do this by touching ${_TMP_DIR}/disable_salt_checks on the target
You can also do this by touching ${BS_TMP_DIR}/disable_salt_checks on the target
host. Default: \${BS_FALSE}
-D Show debug output
-f Force shallow cloning for git installations.
@@ -2819,25 +2819,14 @@ __install_salt_from_repo() {
${_pip_cmd} install --force-reinstall --break-system-packages "${_arch_dep}"
fi
_PIP_VERSION_STRING=$(${_pip_cmd} --version)
echodebug "Installed pip version: $_PIP_VERSION_STRING"
_PIP_MAJOR_VERSION=$(echo "$_PIP_VERSION_STRING" | sed -E 's/^pip ([0-9]+)\..*/\1/')
echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl'"
# The following branching can be removed once we no longer support distros that still ship with
# versions of `pip` earlier than v22.1 such as Debian 11
if [ "$_PIP_MAJOR_VERSION" -lt 23 ]; then
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
${_PIP_INSTALL_ARGS} \
--global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
${_TMP_DIR}/git/deps/salt*.whl || return 1
else
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --config-settings=--global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
${_PIP_INSTALL_ARGS} \
--config-settings="--global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
${_TMP_DIR}/git/deps/salt*.whl || return 1
fi
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
${_PIP_INSTALL_ARGS} \
--global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
${_TMP_DIR}/git/deps/salt*.whl || return 1
echoinfo "Checking if Salt can be imported using ${_py_exe}"
CHECK_SALT_SCRIPT=$(cat << EOM
@@ -6107,14 +6096,7 @@ install_arch_linux_git_deps() {
}
install_arch_linux_onedir_deps() {
echodebug "install_arch_linux_onedir_deps() entry"
# Basic tooling for download/verify/extract
pacman -Sy --noconfirm --needed wget tar gzip gnupg ca-certificates || return 1
# Reuse stable deps for python-yaml etc. if you want config_salt() parity
install_arch_linux_stable_deps || return 1
return 0
}
install_arch_linux_stable() {
@@ -6129,73 +6111,7 @@ install_arch_linux_stable() {
pacman -S --noconfirm --needed bash || return 1
pacman -Su --noconfirm || return 1
# We can now resume regular salt update
# Except that this hasn't been in arch repos for years;
# so we have to build from AUR
# We use "buildgirl" because Eve demanded it.
build_user=${build_user:-buildgirl}
userdel "$build_user" || true
useradd -M -r -s /usr/bin/nologin "$build_user"
echo "$build_user ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/"$build_user"
rm -rf /tmp/yay-bin || true
git clone https://aur.archlinux.org/salt.git /tmp/yay-bin
chown -R "$build_user":"$build_user" /tmp/yay-bin
sudo -u "$build_user" env -i \
HOME=/tmp \
PATH=/usr/bin:/bin:/usr/sbin:/sbin \
MAKEFLAGS="-j$(nproc)" \
LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 \
makepkg -CcsiD /tmp/yay-bin \
--noconfirm --needed \
--noprogressbar || return 1
rm -f /etc/sudoers.d/"$build_user"
rm -rf /tmp/yay-bin
userdel "$build_user"
return 0
}
install_arch_linux_onedir() {
echodebug "install_arch_linux_onedir() entry"
version="${ONEDIR_REV:-latest}"
arch="x86_64"
[ "$(uname -m)" = "aarch64" ] && arch="aarch64"
# Resolve "latest" to actual version
if [ "$version" = "latest" ]; then
version=$(wget -qO- https://api.github.com/repos/saltstack/salt/releases/latest \
| grep -Eo '"tag_name": *"v[0-9.]+"' \
| sed 's/"tag_name": *"v//;s/"//') || return 1
fi
tarball="salt-${version}-onedir-linux-${arch}.tar.xz"
url="https://github.com/saltstack/salt/releases/download/v${version}/${tarball}"
extractdir="/tmp/salt-${version}-onedir-linux-${arch}"
echoinfo "Downloading Salt onedir: $url"
wget -q "$url" -O "/tmp/${tarball}" || return 1
# Validate tarball
if ! tar -tf "/tmp/${tarball}" >/dev/null 2>&1; then
echoerror "Invalid or corrupt onedir tarball"
return 1
fi
# Prepare extraction
rm -rf "$extractdir" || true
rm -rf /opt/saltstack/salt || true
mkdir -p "$extractdir"
# Extract and flatten (remove leading 'salt/' directory)
# /tmp/salt-${version}-onedir-linux-${arch}
tar --strip-components=1 -xf "/tmp/${tarball}" -C "$extractdir"
# Place into /opt
mkdir -p /opt/saltstack/salt
mv "$extractdir"/* /opt/saltstack/salt/ || return 1
chmod -R 755 /opt/saltstack/salt
pacman -Syu --noconfirm salt || return 1
return 0
}
@@ -6333,48 +6249,17 @@ install_arch_check_services() {
return 0
}
install_arch_linux_onedir() {
install_arch_linux_stable || return 1
install_arch_linux_onedir_post() {
echodebug "install_arch_linux_onedir_post() entry"
# Disable any distro/AUR salt units
systemctl disable --now salt-minion.service 2>/dev/null || true
systemctl disable --now salt-master.service 2>/dev/null || true
# Drop a clean unit, same pattern as Debian/Ubuntu onedir
cat >/etc/systemd/system/salt-minion.service <<'EOF'
[Unit]
Description=Salt Minion (onedir)
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/opt/saltstack/salt/salt-minion -c /etc/salt
Restart=always
LimitNOFILE=100000
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
# Add onedir paths system-wide
cat >/etc/profile.d/saltstack.sh <<'EOF'
export PATH=/opt/saltstack/salt:/opt/saltstack/salt/bin:$PATH
EOF
chmod 644 /etc/profile.d/saltstack.sh
if [ "$_START_DAEMONS" -eq $BS_TRUE ]; then
systemctl enable --now salt-minion.service
fi
return 0
return 0
}
install_arch_linux_onedir_post() {
install_arch_linux_post || return 1
return 0
}
#
# Ended Arch Install Functions
#

Some files were not shown because too many files have changed in this diff Show More