Compare commits

..

1 Commits

Author SHA1 Message Date
Jorge Reyes
4630eaef43 Update version to 2.4.0-foxtrot 2025-12-16 16:05:24 -06:00
111 changed files with 1387 additions and 2308 deletions

View File

@@ -33,7 +33,6 @@ body:
- 2.4.180 - 2.4.180
- 2.4.190 - 2.4.190
- 2.4.200 - 2.4.200
- 2.4.201
- 2.4.210 - 2.4.210
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:

View File

@@ -1,17 +1,17 @@
### 2.4.201-20260114 ISO image released on 2026/1/15 ### 2.4.200-20251216 ISO image released on 2025/12/16
### Download and Verify ### Download and Verify
2.4.201-20260114 ISO image: 2.4.200-20251216 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso
MD5: 20E926E433203798512EF46E590C89B9 MD5: 07B38499952D1F2FD7B5AF10096D0043
SHA1: 779E4084A3E1A209B494493B8F5658508B6014FA SHA1: 7F3A26839CA3CAEC2D90BB73D229D55E04C7D370
SHA256: 3D10E7C885AEC5C5D4F4E50F9644FF9728E8C0A2E36EBB8C96B32569685A7C40 SHA256: 8D3AC735873A2EA8527E16A6A08C34BD5018CBC0925AC4096E15A0C99F591D5F
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.201-20260114.iso.sig securityonion-2.4.201-20260114.iso gpg --verify securityonion-2.4.200-20251216.iso.sig securityonion-2.4.200-20251216.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Wed 14 Jan 2026 05:23:39 PM EST using RSA key ID FE507013 gpg: Signature made Mon 15 Dec 2025 05:24:11 PM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.4.210 2.4.0-foxtrot

View File

@@ -1,2 +0,0 @@
ca:
server:

View File

@@ -1,6 +1,5 @@
base: base:
'*': '*':
- ca
- global.soc_global - global.soc_global
- global.adv_global - global.adv_global
- docker.soc_docker - docker.soc_docker

View File

@@ -15,7 +15,11 @@
'salt.minion-check', 'salt.minion-check',
'sensoroni', 'sensoroni',
'salt.lasthighstate', 'salt.lasthighstate',
'salt.minion', 'salt.minion'
] %}
{% set ssl_states = [
'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
@@ -24,7 +28,7 @@
{% set manager_states = [ {% set manager_states = [
'salt.master', 'salt.master',
'ca.server', 'ca',
'registry', 'registry',
'manager', 'manager',
'nginx', 'nginx',
@@ -71,24 +75,28 @@
{# Map role-specific states #} {# Map role-specific states #}
{% set role_states = { {% set role_states = {
'so-eval': ( 'so-eval': (
ssl_states +
manager_states + manager_states +
sensor_states + sensor_states +
elastic_stack_states | reject('equalto', 'logstash') | list + elastic_stack_states | reject('equalto', 'logstash') | list
['logstash.ssl']
), ),
'so-heavynode': ( 'so-heavynode': (
ssl_states +
sensor_states + sensor_states +
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx'] ['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
), ),
'so-idh': ( 'so-idh': (
ssl_states +
['idh'] ['idh']
), ),
'so-import': ( 'so-import': (
ssl_states +
manager_states + manager_states +
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list + sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager'] ['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
), ),
'so-manager': ( 'so-manager': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -96,6 +104,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managerhype': ( 'so-managerhype': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] + ['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
stig_states + stig_states +
@@ -103,6 +112,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managersearch': ( 'so-managersearch': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -110,10 +120,12 @@
elastic_stack_states elastic_stack_states
), ),
'so-searchnode': ( 'so-searchnode': (
ssl_states +
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] + ['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
stig_states stig_states
), ),
'so-standalone': ( 'so-standalone': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
sensor_states + sensor_states +
@@ -122,24 +134,29 @@
elastic_stack_states elastic_stack_states
), ),
'so-sensor': ( 'so-sensor': (
ssl_states +
sensor_states + sensor_states +
['nginx'] + ['nginx'] +
stig_states stig_states
), ),
'so-fleet': ( 'so-fleet': (
ssl_states +
stig_states + stig_states +
['logstash', 'nginx', 'healthcheck', 'elasticfleet'] ['logstash', 'nginx', 'healthcheck', 'elasticfleet']
), ),
'so-receiver': ( 'so-receiver': (
ssl_states +
kafka_states + kafka_states +
stig_states + stig_states +
['logstash', 'redis'] ['logstash', 'redis']
), ),
'so-hypervisor': ( 'so-hypervisor': (
ssl_states +
stig_states + stig_states +
['hypervisor', 'libvirt'] ['hypervisor', 'libvirt']
), ),
'so-desktop': ( 'so-desktop': (
['ssl', 'docker_clean', 'telegraf'] +
stig_states stig_states
) )
} %} } %}

4
salt/ca/dirs.sls Normal file
View File

@@ -0,0 +1,4 @@
pki_issued_certs:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True

View File

@@ -3,10 +3,70 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
{% if GLOBALS.is_manager %} - ca.dirs
- ca.server
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 0
- backup: True
- replace: False
- require:
- sls: ca.dirs
- timeout: 30
- retry:
attempts: 5
interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %} {% endif %}
- ca.trustca

View File

@@ -1,3 +0,0 @@
{% set CA = {
'server': pillar.ca.server
}%}

View File

@@ -1,35 +1,7 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one pki_private_key:
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set setup_running = salt['cmd.retcode']('pgrep -x so-setup') == 0 %}
{% if setup_running%}
include:
- ssl.remove
remove_pki_private_key:
file.absent: file.absent:
- name: /etc/pki/ca.key - name: /etc/pki/ca.key
remove_pki_public_ca_crt: pki_public_ca_crt:
file.absent: file.absent:
- name: /etc/pki/ca.crt - name: /etc/pki/ca.crt
remove_trusttheca:
file.absent:
- name: /etc/pki/tls/certs/intca.crt
remove_pki_public_ca_crt_symlink:
file.absent:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
{% else %}
so-setup_not_running:
test.show_notification:
- text: "This state is reserved for usage during so-setup."
{% endif %}

View File

@@ -1,63 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 7
- backup: True
- replace: False
- timeout: 30
- retry:
attempts: 5
interval: 30
pki_public_ca_crt_symlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
- target: /etc/pki/ca.crt
- require:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,15 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# when the salt-minion signs the cert, a copy is stored here
issued_certs_copypath:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True
signing_policy:
file.managed:
- name: /etc/salt/minion.d/signing_policies.conf
- source: salt://ca/files/signing_policies.conf

View File

@@ -1,26 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- docker
# Trust the CA
trusttheca:
file.managed:
- name: /etc/pki/tls/certs/intca.crt
- source: salt://ca/files/ca.crt
- watch_in:
- service: docker_running
- show_changes: False
- makedirs: True
{% if GLOBALS.os_family == 'Debian' %}
symlinkca:
file.symlink:
- target: /etc/pki/tls/certs/intca.crt
- name: /etc/ssl/certs/intca.crt
{% endif %}

View File

@@ -177,7 +177,7 @@ so-status_script:
- source: salt://common/tools/sbin/so-status - source: salt://common/tools/sbin/so-status
- mode: 755 - mode: 755
{% if GLOBALS.is_sensor %} {% if GLOBALS.role in GLOBALS.sensor_roles %}
# Add sensor cleanup # Add sensor cleanup
so-sensor-clean: so-sensor-clean:
cron.present: cron.present:

View File

@@ -554,39 +554,21 @@ run_check_net_err() {
} }
wait_for_salt_minion() { wait_for_salt_minion() {
local minion="$1" local minion="$1"
local max_wait="${2:-30}" local timeout="${2:-5}"
local interval="${3:-2}" local logfile="${3:-'/dev/stdout'}"
local logfile="${4:-'/dev/stdout'}" retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local elapsed=0 local attempt=0
# each attempts would take about 15 seconds
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting for salt-minion '$minion' to be ready..." local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
while [ $elapsed -lt $max_wait ]; do attempt=$((attempt+1))
# Check if service is running if [[ $attempt -eq $maxAttempts ]]; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if salt-minion service is running" return 1
if ! systemctl is-active --quiet salt-minion; then fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service not running (elapsed: ${elapsed}s)" sleep 10
sleep $interval done
elapsed=$((elapsed + interval)) return 0
continue
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service is running"
# Check if minion responds to ping
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if $minion responds to ping"
if salt "$minion" test.ping --timeout=3 --out=json 2>> "$logfile" | grep -q "true"; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion '$minion' is connected and ready!"
return 0
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting... (${elapsed}s / ${max_wait}s)"
sleep $interval
elapsed=$((elapsed + interval))
done
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - ERROR: salt-minion '$minion' not ready after $max_wait seconds"
return 1
} }
salt_minion_count() { salt_minion_count() {

View File

@@ -129,8 +129,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -161,7 +159,6 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
fi fi

View File

@@ -3,16 +3,29 @@
{# we only want this state to run it is CentOS #} {# we only want this state to run it is CentOS #}
{% if GLOBALS.os == 'OEL' %} {% if GLOBALS.os == 'OEL' %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
{% set manager = GLOBALS.manager %}
{% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set trusttheca_text = global_ca_text[0] %}
{% set ca_server = global_ca_server[0] %}
trusted_ca: trusted_ca:
file.managed: x509.pem_managed:
- name: /etc/pki/ca-trust/source/anchors/ca.crt - name: /etc/pki/ca-trust/source/anchors/ca.crt
- source: salt://ca/files/ca.crt - text: {{ trusttheca_text }}
update_ca_certs: update_ca_certs:
cmd.run: cmd.run:
- name: update-ca-trust - name: update-ca-trust
- onchanges: - onchanges:
- file: trusted_ca - x509: trusted_ca
{% else %} {% else %}

View File

@@ -6,9 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# docker service requires the ca.crt # include ssl since docker service requires the intca
include: include:
- ca - ssl
dockergroup: dockergroup:
group.present: group.present:
@@ -89,9 +89,10 @@ docker_running:
- enable: True - enable: True
- watch: - watch:
- file: docker_daemon - file: docker_daemon
- x509: trusttheca
- require: - require:
- file: docker_daemon - file: docker_daemon
- file: trusttheca - x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present # Reserve OS ports for Docker proxy in case boot settings are not already applied/present

View File

@@ -60,7 +60,7 @@ so-elastalert:
- watch: - watch:
- file: elastaconf - file: elastaconf
- onlyif: - onlyif:
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #} - "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
delete_so-elastalert_so-status.disabled: delete_so-elastalert_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ca
- elasticagent.config - elasticagent.config
- elasticagent.sostatus - elasticagent.sostatus
@@ -56,10 +55,8 @@ so-elastic-agent:
{% endif %} {% endif %}
- require: - require:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
- watch: - watch:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
delete_so-elastic-agent_so-status.disabled: delete_so-elastic-agent_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -95,9 +95,6 @@ soresourcesrepoclone:
- rev: 'main' - rev: 'main'
- depth: 1 - depth: 1
- force_reset: True - force_reset: True
- retry:
attempts: 3
interval: 10
{% endif %} {% endif %}
elasticdefendconfdir: elasticdefendconfdir:

View File

@@ -13,11 +13,9 @@
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %} {% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
include: include:
- ca
- logstash.ssl
- elasticfleet.ssl
- elasticfleet.config - elasticfleet.config
- elasticfleet.sostatus - elasticfleet.sostatus
- ssl
{% if grains.role not in ['so-fleet'] %} {% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready # Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
@@ -135,11 +133,6 @@ so-elastic-fleet:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca
- x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt
- require:
- file: trusttheca
- x509: etc_elasticfleet_key - x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt - x509: etc_elasticfleet_crt
{% endif %} {% endif %}

View File

@@ -2,7 +2,7 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-zeek-logs", "name": "import-zeek-logs",
@@ -10,31 +10,19 @@
"description": "Zeek Import logs", "description": "Zeek Import logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/zeek/logs/*.log" "/nsm/import/*/zeek/logs/*.log"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -11,47 +11,31 @@
{%- endif -%} {%- endif -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "kratos-logs", "name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/kratos/kratos.log" "/opt/so/log/kratos/kratos.log"
], ],
"data_stream.dataset": "kratos", "data_stream.dataset": "kratos",
"pipeline": "kratos", "tags": ["so-kratos"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
{%- if valid_identities -%} {%- if valid_identities -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
{%- else -%} {%- else -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
{%- endif -%} {%- endif -%}
"tags": [ "custom": "pipeline: kratos"
"so-kratos"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -59,3 +43,4 @@
}, },
"force": true "force": true
} }

View File

@@ -2,38 +2,28 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"id": "zeek-logs",
"name": "zeek-logs", "name": "zeek-logs",
"namespace": "so", "namespace": "so",
"description": "Zeek logs", "description": "Zeek logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/zeek/logs/current/*.log" "/nsm/zeek/logs/current/*.log"
], ],
"data_stream.dataset": "zeek", "data_stream.dataset": "zeek",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "9.0.2", "version": "8.18.1",
"requires_root": true "requires_root": true
}, },
"enabled": true, "enabled": true,

View File

@@ -1,43 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "hydra-logs", "name": "hydra-logs",
"namespace": "so",
"description": "Hydra logs", "description": "Hydra logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/hydra/hydra.log" "/opt/so/log/hydra/hydra.log"
], ],
"data_stream.dataset": "hydra", "data_stream.dataset": "hydra",
"pipeline": "hydra", "tags": ["so-hydra"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"exclude_files": [ "custom": "pipeline: hydra"
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"tags": [
"so-hydra"
],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -45,5 +28,3 @@
}, },
"force": true "force": true
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "idh-logs", "name": "idh-logs",
"namespace": "so",
"description": "IDH integration", "description": "IDH integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/idh/opencanary.log" "/nsm/idh/opencanary.log"
], ],
"data_stream.dataset": "idh", "data_stream.dataset": "idh",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,42 +1,29 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-evtx-logs", "name": "import-evtx-logs",
"namespace": "so",
"description": "Import Windows EVTX logs", "description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/evtx/*.json" "/nsm/import/*/evtx/*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "custom": "",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
], ]
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,41 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-suricata-logs", "name": "import-suricata-logs",
"namespace": "so",
"description": "Import Suricata logs", "description": "Import Suricata logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/suricata/eve*.json" "/nsm/import/*/suricata/eve*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
"ignore_older": "72h", "custom": "pipeline: suricata.common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,17 +1,18 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "rita-logs", "name": "rita-logs",
"namespace": "so",
"description": "RITA Logs", "description": "RITA Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
@@ -19,28 +20,15 @@
"/nsm/rita/exploded-dns.csv", "/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv" "/nsm/rita/long-connections.csv"
], ],
"data_stream.dataset": "rita", "exclude_files": [],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"tags": [],
"recursive_glob": true,
"ignore_older": "72h", "ignore_older": "72h",
"clean_inactive": -1, "data_stream.dataset": "rita",
"harvester_limit": 0, "tags": [],
"fingerprint": false, "processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"fingerprint_offset": 0, "custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, }
"force": true
} }

View File

@@ -1,41 +1,29 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "so-ip-mappings", "name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings", "description": "IP Description mappings",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/custom-mappings/ip-descriptions.csv" "/nsm/custom-mappings/ip-descriptions.csv"
], ],
"data_stream.dataset": "hostnamemappings", "data_stream.dataset": "hostnamemappings",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"tags": [ "tags": [
"so-ip-mappings" "so-ip-mappings"
], ],
"recursive_glob": true, "processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"clean_inactive": -1, "custom": ""
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -43,3 +31,5 @@
}, },
"force": true "force": true
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-auth-sync-logs", "name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs", "description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sync.log" "/opt/so/log/soc/sync.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync", "processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"tags": [], "custom": "pipeline: common"
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,44 +1,31 @@
{ {
"policy_id": "so-grid-nodes_general",
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-detections-logs", "name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs", "description": "Security Onion Console - Detections Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log", "/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log" "/opt/so/log/soc/detections_runtime-status_yara.log"
], ],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "tags": [
"so-soc" "so-soc"
], ],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"ignore_older": "72h", "custom": "pipeline: common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,42 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-salt-relay-logs", "name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs", "description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/salt-relay.log" "/opt/so/log/soc/salt-relay.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay", "processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,44 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-sensoroni-logs", "name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs", "description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/sensoroni/sensoroni.log" "/opt/so/log/sensoroni/sensoroni.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,42 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-server-logs", "name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs", "description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sensoroni-server.log" "/opt/so/log/soc/sensoroni-server.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "strelka-logs", "name": "strelka-logs",
"description": "Strelka Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"description": "Strelka logs",
"policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/strelka/log/strelka.log" "/nsm/strelka/log/strelka.log"
], ],
"data_stream.dataset": "strelka", "data_stream.dataset": "strelka",
"pipeline": "strelka.file",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"clean_inactive": -1, "custom": "pipeline: strelka.file"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "suricata-logs", "name": "suricata-logs",
"namespace": "so",
"description": "Suricata integration", "description": "Suricata integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/suricata/eve*.json" "/nsm/suricata/eve*.json"
], ],
"data_stream.dataset": "filestream.generic", "data_stream.dataset": "suricata",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"clean_inactive": -1, "custom": "pipeline: suricata.common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -8,9 +8,7 @@
{% endif %} {% endif %}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% set AGENT_EXISTS = salt['file.file_exists']('/opt/Elastic/Agent/elastic-agent') %} {% if not AGENT_STATUS %}
{% if not AGENT_STATUS or not AGENT_EXISTS %}
pull_agent_installer: pull_agent_installer:
file.managed: file.managed:
@@ -21,7 +19,7 @@ pull_agent_installer:
run_installer: run_installer:
cmd.run: cmd.run:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} -force - name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
- cwd: /opt/so - cwd: /opt/so
- retry: - retry:
attempts: 3 attempts: 3

View File

@@ -21,7 +21,6 @@
'azure_application_insights.app_state': 'azure.app_state', 'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing', 'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function', 'azure_functions.metrics': 'azure.function',
'azure_ai_foundry.metrics': 'azure.ai_foundry',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset', 'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm', 'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance', 'azure_metrics.container_instance': 'azure.container_instance',

View File

@@ -1,186 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
# Start -- Elastic Fleet Host Cert
etc_elasticfleet_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-server.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-server.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-server.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
efperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- group: 939
chownelasticfleetcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Host Cert
{% endif %} # endif is for not including HeavyNodes & Receivers
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
etc_elasticfleet_agent_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-agent.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
- prereq:
- x509: etc_elasticfleet_agent_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_agent_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-agent.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-agent.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_agent_key
efagentperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- group: 939
chownelasticfleetagentcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetagentkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
elasticfleet_kafka_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
- prereq:
- x509: elasticfleet_kafka_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-kafka.crt
- ca_server: {{ CA.server }}
- signing_policy: kafka
- private_key: /etc/pki/elasticfleet-kafka.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_cert_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.crt
- mode: 640
- user: 947
- group: 939
elasticfleet_kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.key
- mode: 640
- user: 947
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -14,7 +14,7 @@ if ! is_manager_node; then
fi fi
# Get current list of Grid Node Agents that need to be upgraded # Get current list of Grid Node Agents that need to be upgraded
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")

View File

@@ -26,7 +26,7 @@ function update_es_urls() {
} }
# Get current list of Fleet Elasticsearch URLs # Get current list of Fleet Elasticsearch URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -86,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list) latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do while read -r package; do
# get package details # get package details

View File

@@ -142,7 +142,7 @@ function update_kafka_outputs() {
{% if GLOBALS.pipeline == "KAFKA" %} {% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs # Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -168,7 +168,7 @@ function update_kafka_outputs() {
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #} {# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %} {% else %}
# Get current list of Logstash Outputs # Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -241,11 +241,9 @@ printf '%s\n'\
"" >> "$global_pillar_file" "" >> "$global_pillar_file"
# Call Elastic-Fleet Salt State # Call Elastic-Fleet Salt State
printf "\nApplying elasticfleet state"
salt-call state.apply elasticfleet queue=True salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node # Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers so-elastic-agent-gen-installers
printf "\nApplying elasticfleet.install_agent_grid state"
salt-call state.apply elasticfleet.install_agent_grid queue=True salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0 exit 0

View File

@@ -23,7 +23,7 @@ function update_fleet_urls() {
} }
# Get current list of Fleet Server URLs # Get current list of Fleet Server URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -47,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg KAFKACA "$KAFKACA" \ --arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
@@ -67,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\ --arg ENABLED_DISABLED "$ENABLED_DISABLED"\
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
--argjson HOSTS "$HOSTS" \ --argjson HOSTS "$HOSTS" \
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"

View File

@@ -26,14 +26,14 @@ catrustscript:
GLOBALS: {{ GLOBALS }} GLOBALS: {{ GLOBALS }}
{% endif %} {% endif %}
elasticsearch_cacerts: cacertz:
file.managed: file.managed:
- name: /opt/so/conf/ca/cacerts - name: /opt/so/conf/ca/cacerts
- source: salt://elasticsearch/cacerts - source: salt://elasticsearch/cacerts
- user: 939 - user: 939
- group: 939 - group: 939
elasticsearch_capems: capemz:
file.managed: file.managed:
- name: /opt/so/conf/ca/tls-ca-bundle.pem - name: /opt/so/conf/ca/tls-ca-bundle.pem
- source: salt://elasticsearch/tls-ca-bundle.pem - source: salt://elasticsearch/tls-ca-bundle.pem

View File

@@ -5,6 +5,11 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
- elasticsearch.ca
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}

View File

@@ -1,6 +1,6 @@
elasticsearch: elasticsearch:
enabled: false enabled: false
version: 9.0.8 version: 8.18.8
index_clean: true index_clean: true
config: config:
action: action:
@@ -857,11 +857,53 @@ elasticsearch:
composed_of: composed_of:
- agent-mappings - agent-mappings
- dtc-agent-mappings - dtc-agent-mappings
- base-mappings
- dtc-base-mappings
- client-mappings
- dtc-client-mappings
- container-mappings
- destination-mappings
- dtc-destination-mappings
- pb-override-destination-mappings
- dll-mappings
- dns-mappings
- dtc-dns-mappings
- ecs-mappings
- dtc-ecs-mappings
- error-mappings
- event-mappings
- dtc-event-mappings
- file-mappings
- dtc-file-mappings
- group-mappings
- host-mappings - host-mappings
- dtc-host-mappings - dtc-host-mappings
- http-mappings - http-mappings
- dtc-http-mappings - dtc-http-mappings
- log-mappings
- metadata-mappings - metadata-mappings
- network-mappings
- dtc-network-mappings
- observer-mappings
- dtc-observer-mappings
- organization-mappings
- package-mappings
- process-mappings
- dtc-process-mappings
- related-mappings
- rule-mappings
- dtc-rule-mappings
- server-mappings
- service-mappings
- dtc-service-mappings
- source-mappings
- dtc-source-mappings
- pb-override-source-mappings
- threat-mappings
- tls-mappings
- url-mappings
- user_agent-mappings
- dtc-user_agent-mappings
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
data_stream: data_stream:

View File

@@ -14,9 +14,6 @@
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} {% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
include: include:
- ca
- elasticsearch.ca
- elasticsearch.ssl
- elasticsearch.config - elasticsearch.config
- elasticsearch.sostatus - elasticsearch.sostatus
@@ -64,7 +61,11 @@ so-elasticsearch:
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
- /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro - /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro
{% if GLOBALS.is_manager %}
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro - /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% endif %}
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
@@ -81,21 +82,22 @@ so-elasticsearch:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca - file: cacertz
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- require: - require:
- file: trusttheca
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- file: eslog4jfile - file: eslog4jfile
- file: nsmesdir - file: nsmesdir
- file: eslogdir - file: eslogdir
- file: cacertz
- x509: /etc/pki/elasticsearch.crt
- x509: /etc/pki/elasticsearch.key
- file: elasticp12perms - file: elasticp12perms
{% if GLOBALS.is_manager %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
- cmd: auth_users_roles_inode - cmd: auth_users_roles_inode
- cmd: auth_users_inode - cmd: auth_users_inode

View File

@@ -1,90 +1,9 @@
{ {
"description": "kratos", "description" : "kratos",
"processors": [ "processors" : [
{ {"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
"set": { {"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
"field": "audience", {"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
"value": "access", { "pipeline": { "name": "common" } }
"override": false, ]
"ignore_failure": true
}
},
{
"set": {
"field": "event.dataset",
"ignore_empty_value": true,
"ignore_failure": true,
"value": "kratos.{{{audience}}}",
"media_type": "text/plain"
}
},
{
"set": {
"field": "event.action",
"ignore_failure": true,
"copy_from": "msg"
}
},
{
"rename": {
"field": "http_request",
"target_field": "http.request",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http_response",
"target_field": "http.response",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.path",
"target_field": "http.uri",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.query",
"target_field": "http.query",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.headers.user-agent",
"target_field": "http.useragent",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "common"
}
}
]
} }

View File

@@ -1,66 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
# Create a cert for elasticsearch
elasticsearch_key:
x509.private_key_managed:
- name: /etc/pki/elasticsearch.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticsearch_crt:
x509.certificate_managed:
- name: /etc/pki/elasticsearch.crt
- ca_server: {{ CA.server }}
- signing_policy: registry
- private_key: /etc/pki/elasticsearch.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:
- x509: /etc/pki/elasticsearch.key
elastickeyperms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.key
- mode: 640
- group: 930
elasticp12perms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.p12
- mode: 640
- group: 930
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -14,9 +14,8 @@ set -e
# Check to see if we have extracted the ca cert. # Check to see if we have extracted the ca cert.
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
# Make sure symbolic links are followed when copying from container docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
docker cp -L so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker cp -L so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker rm so-elasticsearchca docker rm so-elasticsearchca
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem

View File

@@ -121,7 +121,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
echo "Loading Security Onion index templates..." echo "Loading Security Onion index templates..."
shopt -s extglob shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %} {% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)" pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %} {% else %}
pattern="*" pattern="*"
{% endif %} {% endif %}

View File

@@ -9,6 +9,7 @@
include: include:
- salt.minion - salt.minion
- ssl
# Influx DB # Influx DB
influxconfdir: influxconfdir:

View File

@@ -11,7 +11,6 @@
{% set TOKEN = salt['pillar.get']('influxdb:token') %} {% set TOKEN = salt['pillar.get']('influxdb:token') %}
include: include:
- influxdb.ssl
- influxdb.config - influxdb.config
- influxdb.sostatus - influxdb.sostatus
@@ -60,8 +59,6 @@ so-influxdb:
{% endif %} {% endif %}
- watch: - watch:
- file: influxdbconf - file: influxdbconf
- x509: influxdb_key
- x509: influxdb_crt
- require: - require:
- file: influxdbconf - file: influxdbconf
- x509: influxdb_key - x509: influxdb_key

View File

@@ -1,55 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
influxdb_key:
x509.private_key_managed:
- name: /etc/pki/influxdb.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/influxdb.key') -%}
- prereq:
- x509: /etc/pki/influxdb.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Create a cert for the talking to influxdb
influxdb_crt:
x509.certificate_managed:
- name: /etc/pki/influxdb.crt
- ca_server: {{ CA.server }}
- signing_policy: influxdb
- private_key: /etc/pki/influxdb.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
influxkeyperms:
file.managed:
- replace: False
- name: /etc/pki/influxdb.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -68,8 +68,6 @@ so-kafka:
- file: kafka_server_jaas_properties - file: kafka_server_jaas_properties
{% endif %} {% endif %}
- file: kafkacertz - file: kafkacertz
- x509: kafka_crt
- file: kafka_pkcs12_perms
- require: - require:
- file: kafkacertz - file: kafkacertz

View File

@@ -6,13 +6,22 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %} {% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
{% set kafka_password = salt['pillar.get']('kafka:config:password') %} {% set kafka_password = salt['pillar.get']('kafka:config:password') %}
include: include:
- ca - ca.dirs
{% set global_ca_server = [] %}
{% set x509dict = salt['mine.get'](GLOBALS.manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set ca_server = global_ca_server[0] %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} {% if GLOBALS.pipeline == "KAFKA" %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
kafka_client_key: kafka_client_key:
x509.private_key_managed: x509.private_key_managed:
- name: /etc/pki/kafka-client.key - name: /etc/pki/kafka-client.key
@@ -30,12 +39,12 @@ kafka_client_key:
kafka_client_crt: kafka_client_crt:
x509.certificate_managed: x509.certificate_managed:
- name: /etc/pki/kafka-client.crt - name: /etc/pki/kafka-client.crt
- ca_server: {{ CA.server }} - ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka - signing_policy: kafka
- private_key: /etc/pki/kafka-client.key - private_key: /etc/pki/kafka-client.key
- CN: {{ GLOBALS.hostname }} - CN: {{ GLOBALS.hostname }}
- days_remaining: 7 - days_remaining: 0
- days_valid: 820 - days_valid: 820
- backup: True - backup: True
- timeout: 30 - timeout: 30
@@ -58,9 +67,9 @@ kafka_client_crt_perms:
- mode: 640 - mode: 640
- user: 960 - user: 960
- group: 939 - group: 939
{% endif %} {% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %} {% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
kafka_key: kafka_key:
x509.private_key_managed: x509.private_key_managed:
- name: /etc/pki/kafka.key - name: /etc/pki/kafka.key
@@ -78,12 +87,12 @@ kafka_key:
kafka_crt: kafka_crt:
x509.certificate_managed: x509.certificate_managed:
- name: /etc/pki/kafka.crt - name: /etc/pki/kafka.crt
- ca_server: {{ CA.server }} - ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka - signing_policy: kafka
- private_key: /etc/pki/kafka.key - private_key: /etc/pki/kafka.key
- CN: {{ GLOBALS.hostname }} - CN: {{ GLOBALS.hostname }}
- days_remaining: 7 - days_remaining: 0
- days_valid: 820 - days_valid: 820
- backup: True - backup: True
- timeout: 30 - timeout: 30
@@ -94,7 +103,6 @@ kafka_crt:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}" - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
- onchanges: - onchanges:
- x509: /etc/pki/kafka.key - x509: /etc/pki/kafka.key
kafka_key_perms: kafka_key_perms:
file.managed: file.managed:
- replace: False - replace: False
@@ -118,11 +126,11 @@ kafka_pkcs12_perms:
- mode: 640 - mode: 640
- user: 960 - user: 960
- group: 939 - group: 939
{% endif %} {% endif %}
# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka. # Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka.
# Manager will have cert, but be unused until a pipeline is created and logstash enabled. # Manager will have cert, but be unused until a pipeline is created and logstash enabled.
{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %} {% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
kafka_logstash_key: kafka_logstash_key:
x509.private_key_managed: x509.private_key_managed:
- name: /etc/pki/kafka-logstash.key - name: /etc/pki/kafka-logstash.key
@@ -140,12 +148,12 @@ kafka_logstash_key:
kafka_logstash_crt: kafka_logstash_crt:
x509.certificate_managed: x509.certificate_managed:
- name: /etc/pki/kafka-logstash.crt - name: /etc/pki/kafka-logstash.crt
- ca_server: {{ CA.server }} - ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka - signing_policy: kafka
- private_key: /etc/pki/kafka-logstash.key - private_key: /etc/pki/kafka-logstash.key
- CN: {{ GLOBALS.hostname }} - CN: {{ GLOBALS.hostname }}
- days_remaining: 7 - days_remaining: 0
- days_valid: 820 - days_valid: 820
- backup: True - backup: True
- timeout: 30 - timeout: 30
@@ -181,6 +189,7 @@ kafka_logstash_pkcs12_perms:
- user: 931 - user: 931
- group: 939 - group: 939
{% endif %}
{% endif %} {% endif %}
{% else %} {% else %}

View File

@@ -25,10 +25,11 @@ kibana:
discardCorruptObjects: "8.18.8" discardCorruptObjects: "8.18.8"
telemetry: telemetry:
enabled: False enabled: False
security:
showInsecureClusterWarning: False
xpack: xpack:
security: security:
secureCookies: true secureCookies: true
showInsecureClusterWarning: false
reporting: reporting:
kibanaServer: kibanaServer:
hostname: localhost hostname: localhost

View File

@@ -75,7 +75,6 @@ kratosconfig:
- group: 928 - group: 928
- mode: 600 - mode: 600
- template: jinja - template: jinja
- show_changes: False
- defaults: - defaults:
KRATOSMERGED: {{ KRATOSMERGED }} KRATOSMERGED: {{ KRATOSMERGED }}

View File

@@ -46,7 +46,6 @@ kratos:
ui_url: https://URL_BASE/ ui_url: https://URL_BASE/
login: login:
ui_url: https://URL_BASE/login/ ui_url: https://URL_BASE/login/
lifespan: 60m
error: error:
ui_url: https://URL_BASE/login/ ui_url: https://URL_BASE/login/
registration: registration:

View File

@@ -182,10 +182,6 @@ kratos:
global: True global: True
advanced: True advanced: True
helpLink: kratos.html helpLink: kratos.html
lifespan:
description: Defines the duration that a login form will remain valid.
global: True
helpLink: kratos.html
error: error:
ui_url: ui_url:
description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation. description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation.

View File

@@ -10,10 +10,11 @@
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %} {% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
{% set ASSIGNED_PIPELINES = LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %} {% set ASSIGNED_PIPELINES = LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
include: include:
- ssl
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
- elasticsearch - elasticsearch
{% endif %} {% endif %}
# Create the logstash group # Create the logstash group
logstashgroup: logstashgroup:

View File

@@ -63,7 +63,7 @@ logstash:
settings: settings:
lsheap: 500m lsheap: 500m
config: config:
api_x_http_x_host: 0.0.0.0 http_x_host: 0.0.0.0
path_x_logs: /var/log/logstash path_x_logs: /var/log/logstash
pipeline_x_workers: 1 pipeline_x_workers: 1
pipeline_x_batch_x_size: 125 pipeline_x_batch_x_size: 125

View File

@@ -12,7 +12,6 @@
{% set lsheap = LOGSTASH_MERGED.settings.lsheap %} {% set lsheap = LOGSTASH_MERGED.settings.lsheap %}
include: include:
- ca
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %} {% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
- elasticsearch.ca - elasticsearch.ca
{% endif %} {% endif %}
@@ -21,9 +20,9 @@ include:
- kafka.ca - kafka.ca
- kafka.ssl - kafka.ssl
{% endif %} {% endif %}
- logstash.ssl
- logstash.config - logstash.config
- logstash.sostatus - logstash.sostatus
- ssl
so-logstash: so-logstash:
docker_container.running: docker_container.running:
@@ -66,18 +65,22 @@ so-logstash:
- /opt/so/log/logstash:/var/log/logstash:rw - /opt/so/log/logstash:/var/log/logstash:rw
- /sys/fs/cgroup:/sys/fs/cgroup:ro - /sys/fs/cgroup:/sys/fs/cgroup:ro
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro - /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %} {% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro - /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro - /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro - /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro - /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
{% if GLOBALS.role != 'so-fleet' %}
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
{% endif %} {% endif %}
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %} {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
{% endif %} {% endif %}
@@ -97,22 +100,11 @@ so-logstash:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: lsetcsync {% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-receiver'] %}
- file: trusttheca
{% if GLOBALS.is_manager %}
- file: elasticsearch_cacerts
- file: elasticsearch_capems
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- x509: etc_elasticfleet_logstash_crt
- x509: etc_elasticfleet_logstash_key - x509: etc_elasticfleet_logstash_key
- x509: etc_elasticfleetlumberjack_crt - x509: etc_elasticfleet_logstash_crt
- x509: etc_elasticfleetlumberjack_key
{% if GLOBALS.role != 'so-fleet' %}
- x509: etc_filebeat_crt
- file: logstash_filebeat_p8
{% endif %}
{% endif %} {% endif %}
- file: lsetcsync
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %} {% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
- file: ls_pipeline_{{assigned_pipeline}} - file: ls_pipeline_{{assigned_pipeline}}
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %} {% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
@@ -123,20 +115,17 @@ so-logstash:
- file: kafkacertz - file: kafkacertz
{% endif %} {% endif %}
- require: - require:
- file: trusttheca {% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
{% if GLOBALS.is_manager %}
- file: elasticsearch_cacerts
- file: elasticsearch_capems
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- x509: etc_elasticfleet_logstash_crt
- x509: etc_elasticfleet_logstash_key
- x509: etc_elasticfleetlumberjack_crt
- x509: etc_elasticfleetlumberjack_key
{% if GLOBALS.role != 'so-fleet' %}
- x509: etc_filebeat_crt - x509: etc_filebeat_crt
- file: logstash_filebeat_p8 {% endif %}
{% endif %} {% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
{% if grains.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- file: cacertz
- file: capemz
{% endif %} {% endif %}
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
- file: kafkacertz - file: kafkacertz

View File

@@ -5,10 +5,10 @@ input {
codec => es_bulk codec => es_bulk
request_headers_target_field => client_headers request_headers_target_field => client_headers
remote_host_target_field => client_host remote_host_target_field => client_host
ssl_enabled => true ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt" ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key" ssl_key => "/usr/share/logstash/filebeat.key"
ssl_client_authentication => "required" ssl_verify_mode => "peer"
} }
} }

View File

@@ -2,11 +2,11 @@ input {
elastic_agent { elastic_agent {
port => 5055 port => 5055
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ] tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ]
ssl_enabled => true ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt" ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key" ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
ssl_client_authentication => "required" ssl_verify_mode => "force_peer"
ecs_compatibility => v8 ecs_compatibility => v8
} }
} }

View File

@@ -2,7 +2,7 @@ input {
elastic_agent { elastic_agent {
port => 5056 port => 5056
tags => [ "elastic-agent", "fleet-lumberjack-input" ] tags => [ "elastic-agent", "fleet-lumberjack-input" ]
ssl_enabled => true ssl => true
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt" ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key" ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
ecs_compatibility => v8 ecs_compatibility => v8

View File

@@ -8,8 +8,8 @@ output {
document_id => "%{[metadata][_id]}" document_id => "%{[metadata][_id]}"
index => "so-ip-mappings" index => "so-ip-mappings"
silence_errors_in_log => ["version_conflict_engine_exception"] silence_errors_in_log => ["version_conflict_engine_exception"]
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
else { else {
@@ -25,8 +25,8 @@ output {
document_id => "%{[metadata][_id]}" document_id => "%{[metadata][_id]}"
pipeline => "%{[metadata][pipeline]}" pipeline => "%{[metadata][pipeline]}"
silence_errors_in_log => ["version_conflict_engine_exception"] silence_errors_in_log => ["version_conflict_engine_exception"]
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
else { else {
@@ -37,8 +37,8 @@ output {
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
pipeline => "%{[metadata][pipeline]}" pipeline => "%{[metadata][pipeline]}"
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
} }
@@ -49,8 +49,8 @@ output {
data_stream => true data_stream => true
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
ssl_enabled => true ssl => true
ssl_verification_mode=> "none" ssl_certificate_verification => false
} }
} }
} }

View File

@@ -13,8 +13,8 @@ output {
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
index => "endgame-%{+YYYY.MM.dd}" index => "endgame-%{+YYYY.MM.dd}"
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
} }

View File

@@ -56,7 +56,7 @@ logstash:
helpLink: logstash.html helpLink: logstash.html
global: False global: False
config: config:
api_x_http_x_host: http_x_host:
description: Host interface to listen to connections. description: Host interface to listen to connections.
helpLink: logstash.html helpLink: logstash.html
readonly: True readonly: True

View File

@@ -1,287 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states or sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode'] %}
# Start -- Elastic Fleet Logstash Input Cert
etc_elasticfleet_logstash_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-logstash.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-logstash.key') -%}
- prereq:
- x509: etc_elasticfleet_logstash_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_logstash_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-logstash.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-logstash.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-logstash.key -topk8 -out /etc/pki/elasticfleet-logstash.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_logstash_key
eflogstashperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- group: 939
chownelasticfleetlogstashcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.crt
- mode: 640
- user: 931
- group: 939
chownelasticfleetlogstashkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Logstash Input Cert
{% endif %} # endif is for not including HeavyNodes
# Start -- Elastic Fleet Node - Logstash Lumberjack Input / Output
# Cert needed on: Managers, Receivers
etc_elasticfleetlumberjack_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-lumberjack.key
- bits: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-lumberjack.key') -%}
- prereq:
- x509: etc_elasticfleetlumberjack_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleetlumberjack_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-lumberjack.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-lumberjack.key
- CN: {{ GLOBALS.node_ip }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-lumberjack.key -topk8 -out /etc/pki/elasticfleet-lumberjack.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleetlumberjack_key
eflogstashlumberjackperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- group: 939
chownilogstashelasticfleetlumberjackp8:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.p8
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.crt
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Node - Logstash Lumberjack Input / Output
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-receiver'] %}
etc_filebeat_key:
x509.private_key_managed:
- name: /etc/pki/filebeat.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/filebeat.key') -%}
- prereq:
- x509: etc_filebeat_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
etc_filebeat_crt:
x509.certificate_managed:
- name: /etc/pki/filebeat.crt
- ca_server: {{ CA.server }}
- signing_policy: filebeat
- private_key: /etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt"
- onchanges:
- x509: etc_filebeat_key
fbperms:
file.managed:
- replace: False
- name: /etc/pki/filebeat.key
- mode: 640
- group: 939
logstash_filebeat_p8:
file.managed:
- replace: False
- name: /etc/pki/filebeat.p8
- mode: 640
- user: 931
- group: 939
{% if grains.role not in ['so-heavynode', 'so-receiver'] %}
# Create Symlinks to the keys so I can distribute it to all the things
filebeatdir:
file.directory:
- name: /opt/so/saltstack/local/salt/filebeat/files
- makedirs: True
fbkeylink:
file.symlink:
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8
- target: /etc/pki/filebeat.p8
- user: socore
- group: socore
fbcrtlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt
- target: /etc/pki/filebeat.crt
- user: socore
- group: socore
{% endif %}
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-sensor', 'so-searchnode', 'so-heavynode', 'so-fleet', 'so-idh', 'so-receiver'] %}
fbcertdir:
file.directory:
- name: /opt/so/conf/filebeat/etc/pki
- makedirs: True
conf_filebeat_key:
x509.private_key_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%}
- prereq:
- x509: conf_filebeat_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
conf_filebeat_crt:
x509.certificate_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
- ca_server: {{ CA.server }}
- signing_policy: filebeat
- private_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
# Convert the key to pkcs#8 so logstash will work correctly.
filebeatpkcs:
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:"
- onchanges:
- x509: conf_filebeat_key
filebeatkeyperms:
file.managed:
- replace: False
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- mode: 640
- group: 939
chownfilebeatp8:
file.managed:
- replace: False
- name: /opt/so/conf/filebeat/etc/pki/filebeat.p8
- mode: 640
- user: 931
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
elastic_curl_config_distributed: elastic_curl_config_distributed:
file.managed: file.managed:
- name: /opt/so/saltstack/local/salt/elasticsearch/curl.config - name: /opt/so/saltstack/local/salt/elasticsearch/curl.config

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
kibana_curl_config_distributed: kibana_curl_config_distributed:
file.managed: file.managed:
- name: /opt/so/conf/kibana/curl.config - name: /opt/so/conf/kibana/curl.config

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
include: include:
- elasticsearch.auth - elasticsearch.auth
- kratos - kratos

View File

@@ -716,18 +716,6 @@ function checkMine() {
} }
} }
function create_ca_pillar() {
local capillar=/opt/so/saltstack/local/pillar/ca/init.sls
printf '%s\n'\
"ca:"\
" server: $MINION_ID"\
" " > $capillar
if [ $? -ne 0 ]; then
log "ERROR" "Failed to add $MINION_ID to $capillar"
return 1
fi
}
function createEVAL() { function createEVAL() {
log "INFO" "Creating EVAL configuration for minion $MINION_ID" log "INFO" "Creating EVAL configuration for minion $MINION_ID"
is_pcaplimit=true is_pcaplimit=true
@@ -1025,7 +1013,6 @@ function setupMinionFiles() {
managers=("EVAL" "STANDALONE" "IMPORT" "MANAGER" "MANAGERSEARCH") managers=("EVAL" "STANDALONE" "IMPORT" "MANAGER" "MANAGERSEARCH")
if echo "${managers[@]}" | grep -qw "$NODETYPE"; then if echo "${managers[@]}" | grep -qw "$NODETYPE"; then
add_sensoroni_with_analyze_to_minion || return 1 add_sensoroni_with_analyze_to_minion || return 1
create_ca_pillar || return 1
else else
add_sensoroni_to_minion || return 1 add_sensoroni_to_minion || return 1
fi fi

View File

@@ -87,12 +87,6 @@ check_err() {
113) 113)
echo 'No route to host' echo 'No route to host'
;; ;;
160)
echo 'Incompatiable Elasticsearch upgrade'
;;
161)
echo 'Required intermediate Elasticsearch upgrade not complete'
;;
*) *)
echo 'Unhandled error' echo 'Unhandled error'
echo "$err_msg" echo "$err_msg"
@@ -325,19 +319,6 @@ clone_to_tmp() {
fi fi
} }
# there is a function like this in so-minion, but we cannot source it since args required for so-minion
create_ca_pillar() {
local ca_pillar_dir="/opt/so/saltstack/local/pillar/ca"
local ca_pillar_file="${ca_pillar_dir}/init.sls"
echo "Updating CA pillar configuration"
mkdir -p "$ca_pillar_dir"
echo "ca: {}" > "$ca_pillar_file"
so-yaml.py add "$ca_pillar_file" ca.server "$MINIONID"
chown -R socore:socore "$ca_pillar_dir"
}
disable_logstash_heavynodes() { disable_logstash_heavynodes() {
c=0 c=0
printf "\nChecking for heavynodes and disabling Logstash if they exist\n" printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
@@ -381,6 +362,7 @@ masterlock() {
echo "base:" > $TOPFILE echo "base:" > $TOPFILE
echo " $MINIONID:" >> $TOPFILE echo " $MINIONID:" >> $TOPFILE
echo " - ca" >> $TOPFILE echo " - ca" >> $TOPFILE
echo " - ssl" >> $TOPFILE
echo " - elasticsearch" >> $TOPFILE echo " - elasticsearch" >> $TOPFILE
} }
@@ -445,8 +427,6 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180 [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190 [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200 [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201
[[ "$INSTALLEDVERSION" == 2.4.201 ]] && up_to_2.4.210
true true
} }
@@ -461,26 +441,24 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20 [[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30 [[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40 [[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80 [[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90 [[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100 [[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
[[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110 [[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110
[[ "$POSTVERSION" == 2.4.110 ]] && post_to_2.4.111 [[ "$POSTVERSION" == 2.4.110 ]] && post_to_2.4.111
[[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120 [[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120
[[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130 [[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140 [[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141 [[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150 [[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160 [[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170 [[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180 [[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190 [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200 [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201
[[ "$POSTVERSION" == 2.4.201 ]] && post_to_2.4.210
true true
} }
@@ -637,6 +615,9 @@ post_to_2.4.180() {
} }
post_to_2.4.190() { post_to_2.4.190() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# Only need to update import / eval nodes # Only need to update import / eval nodes
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
update_import_fleet_output update_import_fleet_output
@@ -664,22 +645,6 @@ post_to_2.4.200() {
POSTVERSION=2.4.200 POSTVERSION=2.4.200
} }
post_to_2.4.201() {
echo "Nothing to apply"
POSTVERSION=2.4.201
}
post_to_2.4.210() {
echo "Rolling over Kratos index to apply new index template"
rollover_index "logs-kratos-so"
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.210
}
repo_sync() { repo_sync() {
echo "Sync the local repo." echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -941,7 +906,9 @@ up_to_2.4.180() {
} }
up_to_2.4.190() { up_to_2.4.190() {
echo "Nothing to do for 2.4.190" # Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.190 INSTALLEDVERSION=2.4.190
} }
@@ -954,20 +921,6 @@ up_to_2.4.200() {
INSTALLEDVERSION=2.4.200 INSTALLEDVERSION=2.4.200
} }
up_to_2.4.201() {
echo "Nothing to do for 2.4.201"
INSTALLEDVERSION=2.4.201
}
up_to_2.4.210() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
create_ca_pillar
INSTALLEDVERSION=2.4.210
}
add_hydra_pillars() { add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
@@ -1659,252 +1612,6 @@ verify_latest_update_script() {
fi fi
} }
verify_es_version_compatibility() {
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt"
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
# supported upgrade paths for SO-ES versions
declare -A es_upgrade_map=(
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
["8.17.3"]="8.18.4 8.18.6 8.18.8"
["8.18.4"]="8.18.6 8.18.8 9.0.8"
["8.18.6"]="8.18.8 9.0.8"
["8.18.8"]="9.0.8"
)
# Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=(
["8.18.8"]="2.4.190-20251024"
)
# Get current Elasticsearch version
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
else
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
exit 160
fi
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
exit 160
fi
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
fi
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
if [[ -f "$es_required_version_statefile" ]]; then
# required so verification script should have already been created
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script $es_verification_script
fi
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
echo -e "\n##############################################################################################################################\n"
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss!"
# create script using version in statefile
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
echo -e "\n##############################################################################################################################\n"
fi
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
if [[ -z "$compatible_versions" ]]; then
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
next_step_so_version=${es_to_so_version[$first_es_required_version]}
required_es_upgrade_version="$first_es_required_version"
else
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
required_es_upgrade_version="${compatible_versions##* }"
fi
echo -e "\n##############################################################################################################################\n"
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
echo "$required_es_upgrade_version" > "$es_required_version_statefile"
# We expect to upgrade to the latest compatiable minor version of ES
create_intermediate_upgrade_verification_script $es_verification_script
if [[ $is_airgap -eq 0 ]]; then
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
echo -e "\n##############################################################################################################################\n"
exit 160
else
# preserve BRANCH value if set originally
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_version="$BRANCH"
else
local originally_requested_so_version="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n"
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
echo -e \"\n##############################################################################################################################\n\" && \
echo -e \"Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
&& timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e \"\n##############################################################################################################################\n\" \
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
fi
fi
}
create_intermediate_upgrade_verification_script() {
# After an intermediate upgrade, verify that ALL nodes running Elasticsearch are at the expected version BEFORE proceeding to the next upgrade step. This is a CRITICAL step
local verification_script="$1"
cat << 'EOF' > "$verification_script"
#!/bin/bash
SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE="/root/so_intermediate_upgrade_verification_failures.log"
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
EXPECTED_ES_VERSION="$1"
if [[ -z "$EXPECTED_ES_VERSION" ]]; then
echo -e "\nExpected Elasticsearch version not provided. Usage: $0 <expected_es_version>"
exit 1
fi
if [[ -f "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" ]]; then
mv "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE.$CURRENT_TIME"
fi
check_heavynodes_es_version() {
# Check if heavynodes are in this grid
if ! salt-key -l accepted | grep -q 'heavynode$'; then
# No heavynodes, skip version check
echo "No heavynodes detected in this Security Onion deployment. Skipping heavynode Elasticsearch version verification."
return 0
fi
echo -e "\nOne or more heavynodes detected. Verifying their Elasticsearch versions."
local retries=20
local retry_count=0
local delay=180
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2> /dev/null)
local exit_status=$?
# Check that all heavynodes returned good data
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch version from one or more heavynodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$heavynode_versions" | jq -s --arg expected "\"$EXPECTED_ES_VERSION\"" --exit-status 'all(.[]; . | to_entries | all(.[]; .value == $expected))' > /dev/null; then
echo -e "\nAll heavynodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more heavynodes are not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$heavynode_versions" | jq -s 'add'
echo "$heavynode_versions" | jq -s 'add' >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\n Stopping automatic upgrade to latest Security Onion version. Heavynodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\n Heavynodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a long time depending on network link between Manager and Heavynodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
return 1
}
check_searchnodes_es_version() {
local retries=20
local retry_count=0
local delay=180
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 --fail 2>&1)
local exit_status=$?
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch versions from searchnodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$cluster_versions" | jq --arg expected "$EXPECTED_ES_VERSION" --exit-status '.nodes | to_entries | all(.[].value.version; . == $expected)' > /dev/null; then
echo "All Searchnodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$cluster_versions" | jq '.nodes | to_entries | map({(.value.name): .value.version}) | sort | add'
echo "$cluster_versions" >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\nStopping automatic upgrade to latest version. Searchnodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\nSearchnodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a while depending on cluster size / network link between Manager and Searchnodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "$cluster_versions" > "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
return 1
}
# Need to add a check for heavynodes and ensure all heavynodes get their own "cluster" upgraded before moving on to final upgrade.
check_searchnodes_es_version || exit 1
check_heavynodes_es_version || exit 1
# Remove required version state file after successful verification
rm -f "$2"
exit 0
EOF
}
# Keeping this block in case we need to do a hotfix that requires salt update # Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() { apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
@@ -1931,7 +1638,7 @@ apply_hotfix() {
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion" systemctl_func "start" "salt-minion"
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
fi fi
else else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
@@ -2001,8 +1708,6 @@ main() {
echo "Verifying we have the latest soup script." echo "Verifying we have the latest soup script."
verify_latest_update_script verify_latest_update_script
verify_es_version_compatibility
echo "Let's see if we need to update Security Onion." echo "Let's see if we need to update Security Onion."
upgrade_check upgrade_check
upgrade_space upgrade_space
@@ -2130,7 +1835,7 @@ main() {
echo "" echo ""
echo "Running a highstate. This could take several minutes." echo "Running a highstate. This could take several minutes."
set +e set +e
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate highstate
set -e set -e
@@ -2143,7 +1848,7 @@ main() {
check_saltmaster_status check_saltmaster_status
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
# Stop long-running scripts to allow potentially updated scripts to load on the next execution. # Stop long-running scripts to allow potentially updated scripts to load on the next execution.
killall salt-relay.sh killall salt-relay.sh

View File

@@ -6,6 +6,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
# Drop the correct nginx config based on role # Drop the correct nginx config based on role
nginxconfdir: nginxconfdir:
file.directory: file.directory:

View File

@@ -8,14 +8,81 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'nginx/map.jinja' import NGINXMERGED %} {% from 'nginx/map.jinja' import NGINXMERGED %}
{% set ca_server = GLOBALS.minion_id %}
include: include:
- nginx.ssl
- nginx.config - nginx.config
- nginx.sostatus - nginx.sostatus
{% if GLOBALS.role != 'so-fleet' %}
{% set container_config = 'so-nginx' %} {% if grains.role not in ['so-fleet'] %}
{# if the user has selected to replace the crt and key in the ui #}
{% if NGINXMERGED.ssl.replace_cert %}
managerssl_key:
file.managed:
- name: /etc/pki/managerssl.key
- source: salt://nginx/ssl/ssl.key
- mode: 640
- group: 939
- watch_in:
- docker_container: so-nginx
managerssl_crt:
file.managed:
- name: /etc/pki/managerssl.crt
- source: salt://nginx/ssl/ssl.crt
- mode: 644
- watch_in:
- docker_container: so-nginx
{% else %}
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
# Create a cert for the reverse proxy
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ ca_server }}
- signing_policy: managerssl
- private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
{% endif %}
msslkeyperms:
file.managed:
- replace: False
- name: /etc/pki/managerssl.key
- mode: 640
- group: 939
make-rule-dir-nginx: make-rule-dir-nginx:
file.directory: file.directory:
- name: /nsm/rules - name: /nsm/rules
@@ -26,11 +93,15 @@ make-rule-dir-nginx:
- group - group
- show_changes: False - show_changes: False
{% else %}
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
{% set container_config = 'so-nginx-fleet-node' %}
{% endif %} {% endif %}
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
{% if GLOBALS.role == 'so-fleet' %}
{% set container_config = 'so-nginx-fleet-node' %}
{% else %}
{% set container_config = 'so-nginx' %}
{% endif %}
so-nginx: so-nginx:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
@@ -83,27 +154,18 @@ so-nginx:
- watch: - watch:
- file: nginxconf - file: nginxconf
- file: nginxconfdir - file: nginxconfdir
{% if GLOBALS.is_manager %}
{% if NGINXMERGED.ssl.replace_cert %}
- file: managerssl_key
- file: managerssl_crt
{% else %}
- x509: managerssl_key
- x509: managerssl_crt
{% endif%}
{% endif %}
- require: - require:
- file: nginxconf - file: nginxconf
{% if GLOBALS.is_manager %} {% if GLOBALS.is_manager %}
{% if NGINXMERGED.ssl.replace_cert %} {% if NGINXMERGED.ssl.replace_cert %}
- file: managerssl_key - file: managerssl_key
- file: managerssl_crt - file: managerssl_crt
{% else %} {% else %}
- x509: managerssl_key - x509: managerssl_key
- x509: managerssl_crt - x509: managerssl_crt
{% endif%} {% endif%}
- file: navigatorconfig - file: navigatorconfig
{% endif %} {% endif %}
delete_so-nginx_so-status.disabled: delete_so-nginx_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -1,87 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'nginx/map.jinja' import NGINXMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.role != 'so-fleet' %}
{# if the user has selected to replace the crt and key in the ui #}
{% if NGINXMERGED.ssl.replace_cert %}
managerssl_key:
file.managed:
- name: /etc/pki/managerssl.key
- source: salt://nginx/ssl/ssl.key
- mode: 640
- group: 939
- watch_in:
- docker_container: so-nginx
managerssl_crt:
file.managed:
- name: /etc/pki/managerssl.crt
- source: salt://nginx/ssl/ssl.crt
- mode: 644
- watch_in:
- docker_container: so-nginx
{% else %}
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
# Create a cert for the reverse proxy
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ CA.server }}
- signing_policy: managerssl
- private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
{% endif %}
msslkeyperms:
file.managed:
- replace: False
- name: /etc/pki/managerssl.key
- mode: 640
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,22 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states%}
stenoca:
file.directory:
- name: /opt/so/conf/steno/certs
- user: 941
- group: 939
- makedirs: True
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -57,6 +57,12 @@ stenoconf:
PCAPMERGED: {{ PCAPMERGED }} PCAPMERGED: {{ PCAPMERGED }}
STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}" STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}"
stenoca:
file.directory:
- name: /opt/so/conf/steno/certs
- user: 941
- group: 939
pcaptmpdir: pcaptmpdir:
file.directory: file.directory:
- name: /nsm/pcaptmp - name: /nsm/pcaptmp

View File

@@ -10,7 +10,6 @@
include: include:
- pcap.ca
- pcap.config - pcap.config
- pcap.sostatus - pcap.sostatus

View File

@@ -7,6 +7,9 @@
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'redis/map.jinja' import REDISMERGED %} {% from 'redis/map.jinja' import REDISMERGED %}
include:
- ssl
# Redis Setup # Redis Setup
redisconfdir: redisconfdir:
file.directory: file.directory:

View File

@@ -9,8 +9,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- ca
- redis.ssl
- redis.config - redis.config
- redis.sostatus - redis.sostatus
@@ -33,7 +31,11 @@ so-redis:
- /nsm/redis/data:/data:rw - /nsm/redis/data:/data:rw
- /etc/pki/redis.crt:/certs/redis.crt:ro - /etc/pki/redis.crt:/certs/redis.crt:ro
- /etc/pki/redis.key:/certs/redis.key:ro - /etc/pki/redis.key:/certs/redis.key:ro
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/certs/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro - /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro
{% endif %}
{% if DOCKER.containers['so-redis'].custom_bind_mounts %} {% if DOCKER.containers['so-redis'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
@@ -53,14 +55,16 @@ so-redis:
{% endif %} {% endif %}
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch: - watch:
- file: trusttheca
- x509: redis_crt
- x509: redis_key
- file: /opt/so/conf/redis/etc - file: /opt/so/conf/redis/etc
- require: - require:
- file: trusttheca - file: redisconf
- x509: redis_crt - x509: redis_crt
- x509: redis_key - x509: redis_key
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
delete_so-redis_so-status.disabled: delete_so-redis_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -1,54 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
redis_key:
x509.private_key_managed:
- name: /etc/pki/redis.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/redis.key') -%}
- prereq:
- x509: /etc/pki/redis.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
redis_crt:
x509.certificate_managed:
- name: /etc/pki/redis.crt
- ca_server: {{ CA.server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: registry
- private_key: /etc/pki/redis.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
rediskeyperms:
file.managed:
- replace: False
- name: /etc/pki/redis.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -6,6 +6,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
# Create the config directory for the docker registry # Create the config directory for the docker registry
dockerregistryconfdir: dockerregistryconfdir:
file.directory: file.directory:

View File

@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- registry.ssl
- registry.config - registry.config
- registry.sostatus - registry.sostatus
@@ -54,9 +53,6 @@ so-dockerregistry:
- retry: - retry:
attempts: 5 attempts: 5
interval: 30 interval: 30
- watch:
- x509: registry_crt
- x509: registry_key
- require: - require:
- file: dockerregistryconf - file: dockerregistryconf
- x509: registry_crt - x509: registry_crt

View File

@@ -1,77 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
include:
- ca
# Delete directory if it exists at the key path
registry_key_cleanup:
file.absent:
- name: /etc/pki/registry.key
- onlyif:
- test -d /etc/pki/registry.key
registry_key:
x509.private_key_managed:
- name: /etc/pki/registry.key
- keysize: 4096
- backup: True
- new: True
- require:
- file: registry_key_cleanup
{% if salt['file.file_exists']('/etc/pki/registry.key') -%}
- prereq:
- x509: /etc/pki/registry.crt
{%- endif %}
- retry:
attempts: 15
interval: 10
# Delete directory if it exists at the crt path
registry_crt_cleanup:
file.absent:
- name: /etc/pki/registry.crt
- onlyif:
- test -d /etc/pki/registry.crt
# Create a cert for the docker registry
registry_crt:
x509.certificate_managed:
- name: /etc/pki/registry.crt
- ca_server: {{ CA.server }}
- subjectAltName: DNS:{{ GLOBALS.manager }}, IP:{{ GLOBALS.manager_ip }}
- signing_policy: registry
- private_key: /etc/pki/registry.key
- CN: {{ GLOBALS.manager }}
- days_remaining: 7
- days_valid: 820
- backup: True
- require:
- file: registry_crt_cleanup
- timeout: 30
- retry:
attempts: 15
interval: 10
regkeyperms:
file.managed:
- replace: False
- name: /etc/pki/registry.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -46,6 +46,33 @@ def start(interval=60):
mine_update(minion) mine_update(minion)
continue continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived # Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try: try:

View File

@@ -18,6 +18,10 @@ mine_functions:
mine_functions: mine_functions:
network.ip_addrs: network.ip_addrs:
- interface: {{ interface }} - interface: {{ interface }}
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif %}
mine_update_mine_functions: mine_update_mine_functions:
module.run: module.run:

View File

@@ -17,8 +17,8 @@ include:
- repo.client - repo.client
- salt.mine_functions - salt.mine_functions
- salt.minion.service_file - salt.minion.service_file
{% if GLOBALS.is_manager %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- ca.signing_policy - ca
{% endif %} {% endif %}
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %} {% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
@@ -111,7 +111,7 @@ salt_minion_service:
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %} {% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels - file: set_log_levels
{% endif %} {% endif %}
{% if GLOBALS.is_manager %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- file: signing_policy - file: /etc/salt/minion.d/signing_policies.conf
{% endif %} {% endif %}
- order: last - order: last

View File

@@ -8,9 +8,6 @@
include: include:
{% if GLOBALS.is_sensor or GLOBALS.role == 'so-import' %}
- pcap.ca
{% endif %}
- sensoroni.config - sensoroni.config
- sensoroni.sostatus - sensoroni.sostatus
@@ -19,9 +16,7 @@ so-sensoroni:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }}
- network_mode: host - network_mode: host
- binds: - binds:
{% if GLOBALS.is_sensor or GLOBALS.role == 'so-import' %}
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw - /opt/so/conf/steno/certs:/etc/stenographer/certs:rw
{% endif %}
- /nsm/pcap:/nsm/pcap:rw - /nsm/pcap:/nsm/pcap:rw
- /nsm/import:/nsm/import:rw - /nsm/import:/nsm/import:rw
- /nsm/pcapout:/nsm/pcapout:rw - /nsm/pcapout:/nsm/pcapout:rw

View File

@@ -1,91 +0,0 @@
Onion AI Session Report
==========================
## Session Details
**Session ID:** {{.Session.SessionId}}
**Title:** {{.Session.Title}}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.UpdateTime}}
{{ if .Session.DeleteTime }}
**Deleted:** {{ formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.DeleteTime}}
{{ end }}
**User ID:** {{getUserDetail "email" .Session.UserId}}
## Session Usage
**Total Input Tokens** {{.Session.Usage.TotalInputTokens}}
**Total Output Tokens** {{.Session.Usage.TotalOutputTokens}}
**Total Credits:** {{.Session.Usage.TotalCredits}}
**Total Messages:** {{.Session.Usage.TotalMessages}}
## Messages
{{ range $index, $msg := sortAssistantMessages "CreateTime" "asc" .History }}
#### Message {{ add $index 1 }}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $msg.CreateTime}}
**User ID:** {{getUserDetail "email" $msg.UserId}}
**Role:** {{$msg.Message.Role}}
{{ range $i, $block := $msg.Message.ContentBlocks }}
---
{{ if eq $block.Type "text" }}
**Text:** {{ stripEmoji $block.Text }}
{{ else if eq $block.Type "tool_use" }}
**Tool:** {{ $block.Name }}
{{ if $block.Input }}
**Parameters:**
{{ range $key, $value := parseJSON $block.Input }}
{{ if eq $key "limit" }}- {{ $key }}: {{ $value }}
{{ else }}- {{ $key }}: "{{ $value }}"
{{ end }}{{ end }}{{ end }}
{{ else if $block.ToolResult }}
**Tool Result:**
{{ if $block.ToolResult.Content }}
{{ range $j, $contentBlock := $block.ToolResult.Content }}
{{ if gt $j 0 }}
---
{{ end }}
{{ if $contentBlock.Text }}
{{ if $block.ToolResult.IsError }}
**Error:** {{ $contentBlock.Text }}
{{ else }}
{{ $contentBlock.Text }}
{{ end }}
{{ else if $contentBlock.Json }}
```json
{{ toJSON $contentBlock.Json }}
```
{{ end }}{{ end }}
{{ end }}{{ end }}{{ end }}
{{ if eq $msg.Message.Role "assistant" }}{{ if $msg.Message.Usage }}
---
**Message Usage:**
- Input Tokens: {{$msg.Message.Usage.InputTokens}}
- Output Tokens: {{$msg.Message.Usage.OutputTokens}}
- Credits: {{$msg.Message.Usage.Credits}}
{{end}}{{end}}
---
{{end}}

View File

@@ -131,41 +131,3 @@ Security Onion Case Report
{{ range sortHistory "CreateTime" "asc" .History -}} {{ range sortHistory "CreateTime" "asc" .History -}}
| {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}} | {{getUserDetail "email" .UserId}} | {{.Kind}} | {{.Operation}} | | {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}} | {{getUserDetail "email" .UserId}} | {{.Kind}} | {{.Operation}} |
{{end}} {{end}}
## Attached Onion AI Sessions
{{ range $idx, $session := sortAssistantSessionDetails "CreateTime" "desc" .AssistantSessions }}
#### Session {{ add $idx 1 }}
**Session ID:** {{$session.Session.SessionId}}
**Title:** {{$session.Session.Title}}
**User ID:** {{getUserDetail "email" $session.Session.UserId}}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.UpdateTime}}
{{ if $session.Session.DeleteTime }}
**Deleted:** {{ formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.DeleteTime}}
{{ end }}
#### Messages
{{ range $index, $msg := sortAssistantMessages "CreateTime" "asc" $session.History }}
{{ range $i, $block := $msg.Message.ContentBlocks }}
{{ if eq $block.Type "text" }}
**Role:** {{$msg.Message.Role}}
{{ stripEmoji $block.Text }}
---
{{ end }}{{ end }}
{{end}}
{{end}}

View File

@@ -357,7 +357,7 @@ sensoroni:
reports: reports:
standard: standard:
case_report__md: case_report__md:
title: Case Report Template title: Case report Template
description: The template used when generating a case report. Supports markdown format. description: The template used when generating a case report. Supports markdown format.
file: True file: True
global: True global: True
@@ -370,13 +370,6 @@ sensoroni:
global: True global: True
syntax: md syntax: md
helpLink: reports.html helpLink: reports.html
assistant_session_report__md:
title: Assistant Session Report Template
description: The template used when generating an assistant session report. Supports markdown format.
file: True
global: True
syntax: md
helplink: reports.html
custom: custom:
generic_report1__md: generic_report1__md:
title: Custom Report 1 title: Custom Report 1

Some files were not shown because too many files have changed in this diff Show More