Compare commits

..

1 Commits

Author SHA1 Message Date
Mike Reeves
cef71b41e2 Update defaults.yaml to modify model entries
Removed display price from Claude Sonnet 4.5 and deleted QWEN 235B model.
2026-02-09 11:46:50 -05:00
118 changed files with 1776 additions and 2917 deletions

View File

@@ -33,8 +33,6 @@ body:
- 2.4.180 - 2.4.180
- 2.4.190 - 2.4.190
- 2.4.200 - 2.4.200
- 2.4.201
- 2.4.210
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true

View File

@@ -1 +1 @@
2.4.210 2.4.201

View File

@@ -1,2 +0,0 @@
ca:
server:

View File

@@ -1,6 +1,5 @@
base: base:
'*': '*':
- ca
- global.soc_global - global.soc_global
- global.adv_global - global.adv_global
- docker.soc_docker - docker.soc_docker

View File

@@ -15,7 +15,11 @@
'salt.minion-check', 'salt.minion-check',
'sensoroni', 'sensoroni',
'salt.lasthighstate', 'salt.lasthighstate',
'salt.minion', 'salt.minion'
] %}
{% set ssl_states = [
'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
@@ -24,7 +28,7 @@
{% set manager_states = [ {% set manager_states = [
'salt.master', 'salt.master',
'ca.server', 'ca',
'registry', 'registry',
'manager', 'manager',
'nginx', 'nginx',
@@ -71,24 +75,28 @@
{# Map role-specific states #} {# Map role-specific states #}
{% set role_states = { {% set role_states = {
'so-eval': ( 'so-eval': (
ssl_states +
manager_states + manager_states +
sensor_states + sensor_states +
elastic_stack_states | reject('equalto', 'logstash') | list + elastic_stack_states | reject('equalto', 'logstash') | list
['logstash.ssl']
), ),
'so-heavynode': ( 'so-heavynode': (
ssl_states +
sensor_states + sensor_states +
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx'] ['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
), ),
'so-idh': ( 'so-idh': (
ssl_states +
['idh'] ['idh']
), ),
'so-import': ( 'so-import': (
ssl_states +
manager_states + manager_states +
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list + sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager'] ['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
), ),
'so-manager': ( 'so-manager': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -96,6 +104,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managerhype': ( 'so-managerhype': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] + ['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
stig_states + stig_states +
@@ -103,6 +112,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managersearch': ( 'so-managersearch': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -110,10 +120,12 @@
elastic_stack_states elastic_stack_states
), ),
'so-searchnode': ( 'so-searchnode': (
ssl_states +
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] + ['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
stig_states stig_states
), ),
'so-standalone': ( 'so-standalone': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
sensor_states + sensor_states +
@@ -122,24 +134,29 @@
elastic_stack_states elastic_stack_states
), ),
'so-sensor': ( 'so-sensor': (
ssl_states +
sensor_states + sensor_states +
['nginx'] + ['nginx'] +
stig_states stig_states
), ),
'so-fleet': ( 'so-fleet': (
ssl_states +
stig_states + stig_states +
['logstash', 'nginx', 'healthcheck', 'elasticfleet'] ['logstash', 'nginx', 'healthcheck', 'elasticfleet']
), ),
'so-receiver': ( 'so-receiver': (
ssl_states +
kafka_states + kafka_states +
stig_states + stig_states +
['logstash', 'redis'] ['logstash', 'redis']
), ),
'so-hypervisor': ( 'so-hypervisor': (
ssl_states +
stig_states + stig_states +
['hypervisor', 'libvirt'] ['hypervisor', 'libvirt']
), ),
'so-desktop': ( 'so-desktop': (
['ssl', 'docker_clean', 'telegraf'] +
stig_states stig_states
) )
} %} } %}

4
salt/ca/dirs.sls Normal file
View File

@@ -0,0 +1,4 @@
pki_issued_certs:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True

View File

@@ -3,10 +3,70 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
{% if GLOBALS.is_manager %} - ca.dirs
- ca.server
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 0
- backup: True
- replace: False
- require:
- sls: ca.dirs
- timeout: 30
- retry:
attempts: 5
interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %} {% endif %}
- ca.trustca

View File

@@ -1,3 +0,0 @@
{% set CA = {
'server': pillar.ca.server
}%}

View File

@@ -1,35 +1,7 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one pki_private_key:
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set setup_running = salt['cmd.retcode']('pgrep -x so-setup') == 0 %}
{% if setup_running%}
include:
- ssl.remove
remove_pki_private_key:
file.absent: file.absent:
- name: /etc/pki/ca.key - name: /etc/pki/ca.key
remove_pki_public_ca_crt: pki_public_ca_crt:
file.absent: file.absent:
- name: /etc/pki/ca.crt - name: /etc/pki/ca.crt
remove_trusttheca:
file.absent:
- name: /etc/pki/tls/certs/intca.crt
remove_pki_public_ca_crt_symlink:
file.absent:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
{% else %}
so-setup_not_running:
test.show_notification:
- text: "This state is reserved for usage during so-setup."
{% endif %}

View File

@@ -1,63 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 7
- backup: True
- replace: False
- timeout: 30
- retry:
attempts: 5
interval: 30
pki_public_ca_crt_symlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
- target: /etc/pki/ca.crt
- require:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,15 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# when the salt-minion signs the cert, a copy is stored here
issued_certs_copypath:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True
signing_policy:
file.managed:
- name: /etc/salt/minion.d/signing_policies.conf
- source: salt://ca/files/signing_policies.conf

View File

@@ -1,26 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- docker
# Trust the CA
trusttheca:
file.managed:
- name: /etc/pki/tls/certs/intca.crt
- source: salt://ca/files/ca.crt
- watch_in:
- service: docker_running
- show_changes: False
- makedirs: True
{% if GLOBALS.os_family == 'Debian' %}
symlinkca:
file.symlink:
- target: /etc/pki/tls/certs/intca.crt
- name: /etc/ssl/certs/intca.crt
{% endif %}

View File

@@ -177,7 +177,7 @@ so-status_script:
- source: salt://common/tools/sbin/so-status - source: salt://common/tools/sbin/so-status
- mode: 755 - mode: 755
{% if GLOBALS.is_sensor %} {% if GLOBALS.role in GLOBALS.sensor_roles %}
# Add sensor cleanup # Add sensor cleanup
so-sensor-clean: so-sensor-clean:
cron.present: cron.present:

View File

@@ -404,25 +404,6 @@ is_single_node_grid() {
grep "role: so-" /etc/salt/grains | grep -E "eval|standalone|import" &> /dev/null grep "role: so-" /etc/salt/grains | grep -E "eval|standalone|import" &> /dev/null
} }
initialize_elasticsearch_indices() {
local index_names=$1
local default_entry=${2:-'{"@timestamp":"0"}'}
for idx in $index_names; do
if ! so-elasticsearch-query "$idx" --fail --retry 3 --retry-delay 30 >/dev/null 2>&1; then
echo "Index does not already exist. Initializing $idx index."
if retry 3 10 "so-elasticsearch-query "$idx/_doc" -d '$default_entry' -XPOST --fail 2>/dev/null" '"successful":1'; then
echo "Successfully initialized $idx index."
else
echo "Failed to initialize $idx index after 3 attempts."
fi
else
echo "Index $idx already exists. No action needed."
fi
done
}
lookup_bond_interfaces() { lookup_bond_interfaces() {
cat /proc/net/bonding/bond0 | grep "Slave Interface:" | sed -e "s/Slave Interface: //g" cat /proc/net/bonding/bond0 | grep "Slave Interface:" | sed -e "s/Slave Interface: //g"
} }
@@ -574,38 +555,20 @@ run_check_net_err() {
wait_for_salt_minion() { wait_for_salt_minion() {
local minion="$1" local minion="$1"
local max_wait="${2:-30}" local timeout="${2:-5}"
local interval="${3:-2}" local logfile="${3:-'/dev/stdout'}"
local logfile="${4:-'/dev/stdout'}" retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local elapsed=0 local attempt=0
# each attempts would take about 15 seconds
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting for salt-minion '$minion' to be ready..." local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
while [ $elapsed -lt $max_wait ]; do attempt=$((attempt+1))
# Check if service is running if [[ $attempt -eq $maxAttempts ]]; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if salt-minion service is running"
if ! systemctl is-active --quiet salt-minion; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service not running (elapsed: ${elapsed}s)"
sleep $interval
elapsed=$((elapsed + interval))
continue
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service is running"
# Check if minion responds to ping
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if $minion responds to ping"
if salt "$minion" test.ping --timeout=3 --out=json 2>> "$logfile" | grep -q "true"; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion '$minion' is connected and ready!"
return 0
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting... (${elapsed}s / ${max_wait}s)"
sleep $interval
elapsed=$((elapsed + interval))
done
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - ERROR: salt-minion '$minion' not ready after $max_wait seconds"
return 1 return 1
fi
sleep 10
done
return 0
} }
salt_minion_count() { salt_minion_count() {

View File

@@ -129,8 +129,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -161,9 +159,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error while parsing document for index \[.ds-logs-kratos-so-.*object mapping for \[file\]" # false positive (mapping error occuring BEFORE kratos index has rolled over in 2.4.210)
fi fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -227,7 +223,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint).*user so_kibana lacks the required permissions \[logs-\1" # Known issue with 3 integrations using kibana_system role vs creating unique api creds with proper permissions.
fi fi
RESULT=0 RESULT=0

View File

@@ -3,16 +3,29 @@
{# we only want this state to run it is CentOS #} {# we only want this state to run it is CentOS #}
{% if GLOBALS.os == 'OEL' %} {% if GLOBALS.os == 'OEL' %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
{% set manager = GLOBALS.manager %}
{% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set trusttheca_text = global_ca_text[0] %}
{% set ca_server = global_ca_server[0] %}
trusted_ca: trusted_ca:
file.managed: x509.pem_managed:
- name: /etc/pki/ca-trust/source/anchors/ca.crt - name: /etc/pki/ca-trust/source/anchors/ca.crt
- source: salt://ca/files/ca.crt - text: {{ trusttheca_text }}
update_ca_certs: update_ca_certs:
cmd.run: cmd.run:
- name: update-ca-trust - name: update-ca-trust
- onchanges: - onchanges:
- file: trusted_ca - x509: trusted_ca
{% else %} {% else %}

View File

@@ -6,9 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# docker service requires the ca.crt # include ssl since docker service requires the intca
include: include:
- ca - ssl
dockergroup: dockergroup:
group.present: group.present:
@@ -89,9 +89,10 @@ docker_running:
- enable: True - enable: True
- watch: - watch:
- file: docker_daemon - file: docker_daemon
- x509: trusttheca
- require: - require:
- file: docker_daemon - file: docker_daemon
- file: trusttheca - x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present # Reserve OS ports for Docker proxy in case boot settings are not already applied/present

View File

@@ -60,7 +60,7 @@ so-elastalert:
- watch: - watch:
- file: elastaconf - file: elastaconf
- onlyif: - onlyif:
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #} - "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
delete_so-elastalert_so-status.disabled: delete_so-elastalert_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ca
- elasticagent.config - elasticagent.config
- elasticagent.sostatus - elasticagent.sostatus
@@ -56,10 +55,8 @@ so-elastic-agent:
{% endif %} {% endif %}
- require: - require:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
- watch: - watch:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
delete_so-elastic-agent_so-status.disabled: delete_so-elastic-agent_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -3,7 +3,7 @@
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} {%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
id: aea1ba80-1065-11ee-a369-97538913b6a9 id: aea1ba80-1065-11ee-a369-97538913b6a9
revision: 4 revision: 1
outputs: outputs:
default: default:
type: elasticsearch type: elasticsearch
@@ -22,133 +22,242 @@ agent:
metrics: false metrics: false
features: {} features: {}
inputs: inputs:
- id: filestream-filestream-85820eb0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-logs-fefef78c-422f-4cfa-8abf-4cd1b9428f62
name: import-suricata-logs name: import-evtx-logs
revision: 3 revision: 2
type: filestream type: logfile
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 85820eb0-25ef-11f0-a18d-1b26f69b8310 package_policy_id: fefef78c-422f-4cfa-8abf-4cd1b9428f62
streams: streams:
- id: filestream-filestream.generic-85820eb0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-fefef78c-422f-4cfa-8abf-4cd1b9428f62
data_stream: data_stream:
dataset: import dataset: import
paths: paths:
- /nsm/import/*/suricata/eve*.json - /nsm/import/*/evtx/*.json
pipeline: suricata.common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
ignore_older: 72h
clean_inactive: -1
parsers: null
processors:
- add_fields:
target: event
fields:
category: network
module: suricata
imported: true
- dissect:
tokenizer: /nsm/import/%{import.id}/suricata/%{import.file}
field: log.file.path
target_prefix: ''
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-86b4e960-25ef-11f0-a18d-1b26f69b8310
name: import-zeek-logs
revision: 3
type: filestream
use_output: default
meta:
package:
name: filestream
version:
data_stream:
namespace: so
package_policy_id: 86b4e960-25ef-11f0-a18d-1b26f69b8310
streams:
- id: filestream-filestream.generic-86b4e960-25ef-11f0-a18d-1b26f69b8310
data_stream:
dataset: import
paths:
- /nsm/import/*/zeek/logs/*.log
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- >-
(broker|capture_loss|cluster|conn-summary|console|ecat_arp_info|known_certs|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout).log$
clean_inactive: -1
parsers: null
processors: processors:
- dissect: - dissect:
tokenizer: /nsm/import/%{import.id}/zeek/logs/%{import.file}
field: log.file.path field: log.file.path
tokenizer: '/nsm/import/%{import.id}/evtx/%{import.file}'
target_prefix: '' target_prefix: ''
- script:
lang: javascript
source: |
function process(event) {
var pl = event.Get("import.file").slice(0,-4);
event.Put("@metadata.pipeline", "zeek." + pl);
}
- add_fields:
target: event
fields:
category: network
module: zeek
imported: true
- add_tags:
tags: ics
when:
regexp:
import.file: >-
^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-91741240-25ef-11f0-a18d-1b26f69b8310
name: soc-sensoroni-logs
revision: 3
type: filestream
use_output: default
meta:
package:
name: filestream
version:
data_stream:
namespace: so
package_policy_id: 91741240-25ef-11f0-a18d-1b26f69b8310
streams:
- id: filestream-filestream.generic-91741240-25ef-11f0-a18d-1b26f69b8310
data_stream:
dataset: soc
paths:
- /opt/so/log/sensoroni/sensoroni.log
pipeline: common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors:
- decode_json_fields: - decode_json_fields:
fields: fields:
- message - message
target: sensoroni target: ''
- drop_fields:
ignore_missing: true
fields:
- host
- add_fields:
fields:
dataset: system.security
type: logs
namespace: default
target: data_stream
- add_fields:
fields:
dataset: system.security
module: system
imported: true
target: event
- then:
- add_fields:
fields:
dataset: windows.sysmon_operational
target: data_stream
- add_fields:
fields:
dataset: windows.sysmon_operational
module: windows
imported: true
target: event
if:
equals:
winlog.channel: Microsoft-Windows-Sysmon/Operational
- then:
- add_fields:
fields:
dataset: system.application
target: data_stream
- add_fields:
fields:
dataset: system.application
target: event
if:
equals:
winlog.channel: Application
- then:
- add_fields:
fields:
dataset: system.system
target: data_stream
- add_fields:
fields:
dataset: system.system
target: event
if:
equals:
winlog.channel: System
- then:
- add_fields:
fields:
dataset: windows.powershell_operational
target: data_stream
- add_fields:
fields:
dataset: windows.powershell_operational
module: windows
target: event
if:
equals:
winlog.channel: Microsoft-Windows-PowerShell/Operational
tags:
- import
- id: logfile-redis-fc98c947-7d17-4861-a318-7ad075f6d1b0
name: redis-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: redis
version:
data_stream:
namespace: default
package_policy_id: fc98c947-7d17-4861-a318-7ad075f6d1b0
streams:
- id: logfile-redis.log-fc98c947-7d17-4861-a318-7ad075f6d1b0
data_stream:
dataset: redis.log
type: logs
exclude_files:
- .gz$
paths:
- /opt/so/log/redis/redis.log
tags:
- redis-log
exclude_lines:
- '^\s+[\-`(''.|_]'
- id: logfile-logs-3b56803d-5ade-4c93-b25e-9b37182f66b8
name: import-suricata-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 3b56803d-5ade-4c93-b25e-9b37182f66b8
streams:
- id: logfile-log.log-3b56803d-5ade-4c93-b25e-9b37182f66b8
data_stream:
dataset: import
pipeline: suricata.common
paths:
- /nsm/import/*/suricata/eve*.json
processors:
- add_fields:
fields:
module: suricata
imported: true
category: network
target: event
- dissect:
field: log.file.path
tokenizer: '/nsm/import/%{import.id}/suricata/%{import.file}'
target_prefix: ''
- id: logfile-logs-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
name: soc-server-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: c327e1a3-1ebe-449c-a8eb-f6f35032e69d
streams:
- id: logfile-log.log-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/sensoroni-server.log
processors:
- decode_json_fields:
add_error_key: true
process_array: true process_array: true
max_depth: 2 max_depth: 2
add_error_key: true
- add_fields:
target: event
fields: fields:
- message
target: soc
- add_fields:
fields:
module: soc
dataset_temp: server
category: host category: host
target: event
- rename:
ignore_missing: true
fields:
- from: soc.fields.sourceIp
to: source.ip
- from: soc.fields.status
to: http.response.status_code
- from: soc.fields.method
to: http.request.method
- from: soc.fields.path
to: url.path
- from: soc.message
to: event.action
- from: soc.level
to: log.level
tags:
- so-soc
- id: logfile-logs-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
name: soc-sensoroni-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
streams:
- id: logfile-log.log-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/sensoroni/sensoroni.log
processors:
- decode_json_fields:
add_error_key: true
process_array: true
max_depth: 2
fields:
- message
target: sensoroni
- add_fields:
fields:
module: soc module: soc
dataset_temp: sensoroni dataset_temp: sensoroni
category: host
target: event
- rename: - rename:
ignore_missing: true
fields: fields:
- from: sensoroni.fields.sourceIp - from: sensoroni.fields.sourceIp
to: source.ip to: source.ip
@@ -162,100 +271,141 @@ inputs:
to: event.action to: event.action
- from: sensoroni.level - from: sensoroni.level
to: log.level to: log.level
ignore_missing: true - id: logfile-logs-df0d7f2c-221f-433b-b18b-d1cf83250515
file_identity.native: null name: soc-salt-relay-logs
prospector.scanner.fingerprint.enabled: false revision: 2
- id: filestream-filestream-976e3900-25ef-11f0-a18d-1b26f69b8310 type: logfile
name: suricata-logs
revision: 3
type: filestream
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 976e3900-25ef-11f0-a18d-1b26f69b8310 package_policy_id: df0d7f2c-221f-433b-b18b-d1cf83250515
streams: streams:
- id: filestream-filestream.generic-976e3900-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-df0d7f2c-221f-433b-b18b-d1cf83250515
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/salt-relay.log
processors:
- dissect:
field: message
tokenizer: '%{soc.ts} | %{event.action}'
target_prefix: ''
- add_fields:
fields:
module: soc
dataset_temp: salt_relay
category: host
target: event
tags:
- so-soc
- id: logfile-logs-74bd2366-fe52-493c-bddc-843a017fc4d0
name: soc-auth-sync-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 74bd2366-fe52-493c-bddc-843a017fc4d0
streams:
- id: logfile-log.log-74bd2366-fe52-493c-bddc-843a017fc4d0
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/sync.log
processors:
- dissect:
field: message
tokenizer: '%{event.action}'
target_prefix: ''
- add_fields:
fields:
module: soc
dataset_temp: auth_sync
category: host
target: event
tags:
- so-soc
- id: logfile-logs-d151d9bf-ff2a-4529-9520-c99244bc0253
name: suricata-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: d151d9bf-ff2a-4529-9520-c99244bc0253
streams:
- id: logfile-log.log-d151d9bf-ff2a-4529-9520-c99244bc0253
data_stream: data_stream:
dataset: suricata dataset: suricata
pipeline: suricata.common
paths: paths:
- /nsm/suricata/eve*.json - /nsm/suricata/eve*.json
pipeline: suricata.common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors: processors:
- add_fields: - add_fields:
target: event
fields: fields:
category: network
module: suricata module: suricata
file_identity.native: null category: network
prospector.scanner.fingerprint.enabled: false target: event
- id: filestream-filestream-95091fe0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-logs-31f94d05-ae75-40ee-b9c5-0e0356eff327
name: strelka-logs name: strelka-logs
revision: 3 revision: 2
type: filestream type: logfile
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 95091fe0-25ef-11f0-a18d-1b26f69b8310 package_policy_id: 31f94d05-ae75-40ee-b9c5-0e0356eff327
streams: streams:
- id: filestream-filestream.generic-95091fe0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-31f94d05-ae75-40ee-b9c5-0e0356eff327
data_stream: data_stream:
dataset: strelka dataset: strelka
pipeline: strelka.file
paths: paths:
- /nsm/strelka/log/strelka.log - /nsm/strelka/log/strelka.log
pipeline: strelka.file
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors: processors:
- add_fields: - add_fields:
target: event
fields: fields:
category: file
module: strelka module: strelka
file_identity.native: null category: file
prospector.scanner.fingerprint.enabled: false target: event
- id: filestream-filestream-9f309ca0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-logs-6197fe84-9b58-4d9b-8464-3d517f28808d
name: zeek-logs name: zeek-logs
revision: 2 revision: 1
type: filestream type: logfile
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 9f309ca0-25ef-11f0-a18d-1b26f69b8310 package_policy_id: 6197fe84-9b58-4d9b-8464-3d517f28808d
streams: streams:
- id: filestream-filestream.generic-9f309ca0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-6197fe84-9b58-4d9b-8464-3d517f28808d
data_stream: data_stream:
dataset: zeek dataset: zeek
paths: paths:
- /nsm/zeek/logs/current/*.log - /nsm/zeek/logs/current/*.log
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- >-
(broker|capture_loss|cluster|conn-summary|console|ecat_arp_info|known_certs|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout).log$
clean_inactive: -1
parsers: null
processors: processors:
- dissect: - dissect:
tokenizer: /nsm/zeek/logs/current/%{pipeline}.log tokenizer: '/nsm/zeek/logs/current/%{pipeline}.log'
field: log.file.path field: log.file.path
trim_chars: .log trim_chars: .log
target_prefix: '' target_prefix: ''
@@ -277,17 +427,18 @@ inputs:
regexp: regexp:
pipeline: >- pipeline: >-
^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm* ^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*
file_identity.native: null exclude_files:
prospector.scanner.fingerprint.enabled: false - >-
broker|capture_loss|cluster|ecat_arp_info|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout.log$
- id: udp-udp-35051de0-46a5-11ee-8d5d-9f98c8182f60 - id: udp-udp-35051de0-46a5-11ee-8d5d-9f98c8182f60
name: syslog-udp-514 name: syslog-udp-514
revision: 4 revision: 3
type: udp type: udp
use_output: default use_output: default
meta: meta:
package: package:
name: udp name: udp
version: version: 1.10.0
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 35051de0-46a5-11ee-8d5d-9f98c8182f60 package_policy_id: 35051de0-46a5-11ee-8d5d-9f98c8182f60
@@ -307,13 +458,13 @@ inputs:
- syslog - syslog
- id: tcp-tcp-33d37bb0-46a5-11ee-8d5d-9f98c8182f60 - id: tcp-tcp-33d37bb0-46a5-11ee-8d5d-9f98c8182f60
name: syslog-tcp-514 name: syslog-tcp-514
revision: 4 revision: 3
type: tcp type: tcp
use_output: default use_output: default
meta: meta:
package: package:
name: tcp name: tcp
version: version: 1.10.0
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 33d37bb0-46a5-11ee-8d5d-9f98c8182f60 package_policy_id: 33d37bb0-46a5-11ee-8d5d-9f98c8182f60

View File

@@ -11,7 +11,6 @@
include: include:
- elasticfleet.artifact_registry - elasticfleet.artifact_registry
- elasticfleet.ssl
# Add EA Group # Add EA Group
elasticfleetgroup: elasticfleetgroup:
@@ -96,9 +95,6 @@ soresourcesrepoclone:
- rev: 'main' - rev: 'main'
- depth: 1 - depth: 1
- force_reset: True - force_reset: True
- retry:
attempts: 3
interval: 10
{% endif %} {% endif %}
elasticdefendconfdir: elasticdefendconfdir:

View File

@@ -13,10 +13,9 @@
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %} {% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
include: include:
- ca
- logstash.ssl
- elasticfleet.config - elasticfleet.config
- elasticfleet.sostatus - elasticfleet.sostatus
- ssl
{% if grains.role not in ['so-fleet'] %} {% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready # Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
@@ -134,11 +133,6 @@ so-elastic-fleet:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca
- x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt
- require:
- file: trusttheca
- x509: etc_elasticfleet_key - x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt - x509: etc_elasticfleet_crt
{% endif %} {% endif %}

View File

@@ -2,7 +2,7 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-zeek-logs", "name": "import-zeek-logs",
@@ -10,31 +10,19 @@
"description": "Zeek Import logs", "description": "Zeek Import logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/zeek/logs/*.log" "/nsm/import/*/zeek/logs/*.log"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -11,47 +11,31 @@
{%- endif -%} {%- endif -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "kratos-logs", "name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/kratos/kratos.log" "/opt/so/log/kratos/kratos.log"
], ],
"data_stream.dataset": "kratos", "data_stream.dataset": "kratos",
"pipeline": "kratos", "tags": ["so-kratos"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
{%- if valid_identities -%} {%- if valid_identities -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
{%- else -%} {%- else -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
{%- endif -%} {%- endif -%}
"tags": [ "custom": "pipeline: kratos"
"so-kratos"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -59,3 +43,4 @@
}, },
"force": true "force": true
} }

View File

@@ -2,38 +2,28 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"id": "zeek-logs",
"name": "zeek-logs", "name": "zeek-logs",
"namespace": "so", "namespace": "so",
"description": "Zeek logs", "description": "Zeek logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/zeek/logs/current/*.log" "/nsm/zeek/logs/current/*.log"
], ],
"data_stream.dataset": "zeek", "data_stream.dataset": "zeek",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "9.0.2", "version": "8.18.1",
"requires_root": true "requires_root": true
}, },
"enabled": true, "enabled": true,

View File

@@ -1,43 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "hydra-logs", "name": "hydra-logs",
"namespace": "so",
"description": "Hydra logs", "description": "Hydra logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/hydra/hydra.log" "/opt/so/log/hydra/hydra.log"
], ],
"data_stream.dataset": "hydra", "data_stream.dataset": "hydra",
"pipeline": "hydra", "tags": ["so-hydra"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"tags": [ "custom": "pipeline: hydra"
"so-hydra"
],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -45,5 +28,3 @@
}, },
"force": true "force": true
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "idh-logs", "name": "idh-logs",
"namespace": "so",
"description": "IDH integration", "description": "IDH integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/idh/opencanary.log" "/nsm/idh/opencanary.log"
], ],
"data_stream.dataset": "idh", "data_stream.dataset": "idh",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,42 +1,29 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-evtx-logs", "name": "import-evtx-logs",
"namespace": "so",
"description": "Import Windows EVTX logs", "description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/evtx/*.json" "/nsm/import/*/evtx/*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "custom": "",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
], ]
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,41 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-suricata-logs", "name": "import-suricata-logs",
"namespace": "so",
"description": "Import Suricata logs", "description": "Import Suricata logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/suricata/eve*.json" "/nsm/import/*/suricata/eve*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
"ignore_older": "72h", "custom": "pipeline: suricata.common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -15,7 +15,7 @@
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/redis/redis-server.log" "/opt/so/log/redis/redis.log"
], ],
"tags": [ "tags": [
"redis-log" "redis-log"

View File

@@ -1,17 +1,18 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "rita-logs", "name": "rita-logs",
"namespace": "so",
"description": "RITA Logs", "description": "RITA Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
@@ -19,28 +20,15 @@
"/nsm/rita/exploded-dns.csv", "/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv" "/nsm/rita/long-connections.csv"
], ],
"data_stream.dataset": "rita", "exclude_files": [],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"tags": [],
"recursive_glob": true,
"ignore_older": "72h", "ignore_older": "72h",
"clean_inactive": -1, "data_stream.dataset": "rita",
"harvester_limit": 0, "tags": [],
"fingerprint": false, "processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"fingerprint_offset": 0, "custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
"fingerprint_length": "64", }
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
},
"force": true
} }

View File

@@ -1,41 +1,29 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "so-ip-mappings", "name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings", "description": "IP Description mappings",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/custom-mappings/ip-descriptions.csv" "/nsm/custom-mappings/ip-descriptions.csv"
], ],
"data_stream.dataset": "hostnamemappings", "data_stream.dataset": "hostnamemappings",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"tags": [ "tags": [
"so-ip-mappings" "so-ip-mappings"
], ],
"recursive_glob": true, "processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"clean_inactive": -1, "custom": ""
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -43,3 +31,5 @@
}, },
"force": true "force": true
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-auth-sync-logs", "name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs", "description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sync.log" "/opt/so/log/soc/sync.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync", "processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"tags": [], "custom": "pipeline: common"
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,44 +1,31 @@
{ {
"policy_id": "so-grid-nodes_general",
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-detections-logs", "name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs", "description": "Security Onion Console - Detections Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log", "/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log" "/opt/so/log/soc/detections_runtime-status_yara.log"
], ],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "tags": [
"so-soc" "so-soc"
], ],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"ignore_older": "72h", "custom": "pipeline: common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,42 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-salt-relay-logs", "name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs", "description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/salt-relay.log" "/opt/so/log/soc/salt-relay.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay", "processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-sensoroni-logs", "name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs", "description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/sensoroni/sensoroni.log" "/opt/so/log/sensoroni/sensoroni.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,42 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-server-logs", "name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs", "description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sensoroni-server.log" "/opt/so/log/soc/sensoroni-server.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "strelka-logs", "name": "strelka-logs",
"description": "Strelka Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"description": "Strelka logs",
"policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/strelka/log/strelka.log" "/nsm/strelka/log/strelka.log"
], ],
"data_stream.dataset": "strelka", "data_stream.dataset": "strelka",
"pipeline": "strelka.file",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"clean_inactive": -1, "custom": "pipeline: strelka.file"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "suricata-logs", "name": "suricata-logs",
"namespace": "so",
"description": "Suricata integration", "description": "Suricata integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/suricata/eve*.json" "/nsm/suricata/eve*.json"
], ],
"data_stream.dataset": "filestream.generic", "data_stream.dataset": "suricata",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"clean_inactive": -1, "custom": "pipeline: suricata.common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -1,107 +0,0 @@
{
"package": {
"name": "elasticsearch",
"version": ""
},
"name": "elasticsearch-grid-nodes_heavy",
"namespace": "default",
"description": "Elasticsearch Logs",
"policy_id": "so-grid-nodes_heavy",
"inputs": {
"elasticsearch-logfile": {
"enabled": true,
"streams": {
"elasticsearch.audit": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_audit.json"
]
}
},
"elasticsearch.deprecation": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_deprecation.json"
]
}
},
"elasticsearch.gc": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/gc.log.[0-9]*",
"/var/log/elasticsearch/gc.log"
]
}
},
"elasticsearch.server": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/elasticsearch/*.json"
]
}
},
"elasticsearch.slowlog": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_index_search_slowlog.json",
"/var/log/elasticsearch/*_index_indexing_slowlog.json"
]
}
}
}
},
"elasticsearch-elasticsearch/metrics": {
"enabled": false,
"vars": {
"hosts": [
"http://localhost:9200"
],
"scope": "node"
},
"streams": {
"elasticsearch.stack_monitoring.ccr": {
"enabled": false
},
"elasticsearch.stack_monitoring.cluster_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.enrich": {
"enabled": false
},
"elasticsearch.stack_monitoring.index": {
"enabled": false
},
"elasticsearch.stack_monitoring.index_recovery": {
"enabled": false,
"vars": {
"active.only": true
}
},
"elasticsearch.stack_monitoring.index_summary": {
"enabled": false
},
"elasticsearch.stack_monitoring.ml_job": {
"enabled": false
},
"elasticsearch.stack_monitoring.node": {
"enabled": false
},
"elasticsearch.stack_monitoring.node_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.pending_tasks": {
"enabled": false
},
"elasticsearch.stack_monitoring.shard": {
"enabled": false
}
}
}
},
"force": true
}

View File

@@ -8,9 +8,7 @@
{% endif %} {% endif %}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% set AGENT_EXISTS = salt['file.file_exists']('/opt/Elastic/Agent/elastic-agent') %} {% if not AGENT_STATUS %}
{% if not AGENT_STATUS or not AGENT_EXISTS %}
pull_agent_installer: pull_agent_installer:
file.managed: file.managed:
@@ -21,7 +19,7 @@ pull_agent_installer:
run_installer: run_installer:
cmd.run: cmd.run:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} -force - name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
- cwd: /opt/so - cwd: /opt/so
- retry: - retry:
attempts: 3 attempts: 3

View File

@@ -21,7 +21,6 @@
'azure_application_insights.app_state': 'azure.app_state', 'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing', 'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function', 'azure_functions.metrics': 'azure.function',
'azure_ai_foundry.metrics': 'azure.ai_foundry',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset', 'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm', 'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance', 'azure_metrics.container_instance': 'azure.container_instance',

View File

@@ -1,186 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
# Start -- Elastic Fleet Host Cert
etc_elasticfleet_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-server.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-server.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-server.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
efperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- group: 939
chownelasticfleetcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Host Cert
{% endif %} # endif is for not including HeavyNodes & Receivers
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
etc_elasticfleet_agent_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-agent.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
- prereq:
- x509: etc_elasticfleet_agent_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_agent_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-agent.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-agent.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_agent_key
efagentperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- group: 939
chownelasticfleetagentcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetagentkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
elasticfleet_kafka_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
- prereq:
- x509: elasticfleet_kafka_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-kafka.crt
- ca_server: {{ CA.server }}
- signing_policy: kafka
- private_key: /etc/pki/elasticfleet-kafka.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_cert_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.crt
- mode: 640
- user: 947
- group: 939
elasticfleet_kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.key
- mode: 640
- user: 947
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -17,9 +17,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# Third, configure Elastic Defend Integration seperately # Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n" printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
@@ -40,7 +40,8 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
done done
# Grid Nodes - General # Grid Nodes - General
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_general/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_general/*.json
do
printf "\n\nGrid Nodes Policy_General - Loading $INTEGRATION\n" printf "\n\nGrid Nodes Policy_General - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
@@ -59,9 +60,13 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
fi fi
fi fi
done done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Grid Nodes - Heavy # Grid Nodes - Heavy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_heavy/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_heavy/*.json
do
printf "\n\nGrid Nodes Policy_Heavy - Loading $INTEGRATION\n" printf "\n\nGrid Nodes Policy_Heavy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
@@ -73,16 +78,22 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1 RETURN_CODE=1
continue continue
fi fi
fi fi
fi
done done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Fleet Server - Optional integrations # Fleet Server - Optional integrations
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json
do
if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then
FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7` FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7`
printf "\n\nFleet Server Policy - Loading $INTEGRATION\n" printf "\n\nFleet Server Policy - Loading $INTEGRATION\n"
@@ -106,8 +117,6 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
fi fi
fi fi
done done
# Only create the state file if all policies were created/updated successfully
if [[ "$RETURN_CODE" != "1" ]]; then if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt touch /opt/so/state/eaintegrations.txt
fi fi

View File

@@ -14,7 +14,7 @@ if ! is_manager_node; then
fi fi
# Get current list of Grid Node Agents that need to be upgraded # Get current list of Grid Node Agents that need to be upgraded
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")

View File

@@ -26,7 +26,7 @@ function update_es_urls() {
} }
# Get current list of Fleet Elasticsearch URLs # Get current list of Fleet Elasticsearch URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -86,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list) latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do while read -r package; do
# get package details # get package details

View File

@@ -142,7 +142,7 @@ function update_kafka_outputs() {
{% if GLOBALS.pipeline == "KAFKA" %} {% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs # Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -168,7 +168,7 @@ function update_kafka_outputs() {
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #} {# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %} {% else %}
# Get current list of Logstash Outputs # Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -241,11 +241,9 @@ printf '%s\n'\
"" >> "$global_pillar_file" "" >> "$global_pillar_file"
# Call Elastic-Fleet Salt State # Call Elastic-Fleet Salt State
printf "\nApplying elasticfleet state"
salt-call state.apply elasticfleet queue=True salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node # Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers so-elastic-agent-gen-installers
printf "\nApplying elasticfleet.install_agent_grid state"
salt-call state.apply elasticfleet.install_agent_grid queue=True salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0 exit 0

View File

@@ -23,7 +23,7 @@ function update_fleet_urls() {
} }
# Get current list of Fleet Server URLs # Get current list of Fleet Server URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -34,11 +34,6 @@ if [[ "$RETURN_CODE" != "0" ]]; then
exit 1 exit 1
fi fi
if [[ ! -f /etc/pki/elasticfleet-kafka.crt || ! -f /etc/pki/elasticfleet-kafka.key ]]; then
echo -e "\nKafka certificates not found, can't setup Elastic Fleet output policy for Kafka...\n"
exit 1
fi
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
@@ -52,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg KAFKACA "$KAFKACA" \ --arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
@@ -72,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\ --arg ENABLED_DISABLED "$ENABLED_DISABLED"\
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
--argjson HOSTS "$HOSTS" \ --argjson HOSTS "$HOSTS" \
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"

View File

@@ -26,14 +26,14 @@ catrustscript:
GLOBALS: {{ GLOBALS }} GLOBALS: {{ GLOBALS }}
{% endif %} {% endif %}
elasticsearch_cacerts: cacertz:
file.managed: file.managed:
- name: /opt/so/conf/ca/cacerts - name: /opt/so/conf/ca/cacerts
- source: salt://elasticsearch/cacerts - source: salt://elasticsearch/cacerts
- user: 939 - user: 939
- group: 939 - group: 939
elasticsearch_capems: capemz:
file.managed: file.managed:
- name: /opt/so/conf/ca/tls-ca-bundle.pem - name: /opt/so/conf/ca/tls-ca-bundle.pem
- source: salt://elasticsearch/tls-ca-bundle.pem - source: salt://elasticsearch/tls-ca-bundle.pem

View File

@@ -5,6 +5,11 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
- elasticsearch.ca
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}

View File

@@ -1,13 +1,11 @@
elasticsearch: elasticsearch:
enabled: false enabled: false
version: 9.0.8 version: 8.18.8
index_clean: true index_clean: true
config: config:
action: action:
destructive_requires_name: true destructive_requires_name: true
cluster: cluster:
logsdb:
enabled: false
routing: routing:
allocation: allocation:
disk: disk:
@@ -693,6 +691,7 @@ elasticsearch:
match_mapping_type: string match_mapping_type: string
settings: settings:
index: index:
final_pipeline: .fleet_final_pipeline-1
lifecycle: lifecycle:
name: so-import-logs name: so-import-logs
mapping: mapping:
@@ -858,11 +857,53 @@ elasticsearch:
composed_of: composed_of:
- agent-mappings - agent-mappings
- dtc-agent-mappings - dtc-agent-mappings
- base-mappings
- dtc-base-mappings
- client-mappings
- dtc-client-mappings
- container-mappings
- destination-mappings
- dtc-destination-mappings
- pb-override-destination-mappings
- dll-mappings
- dns-mappings
- dtc-dns-mappings
- ecs-mappings
- dtc-ecs-mappings
- error-mappings
- event-mappings
- dtc-event-mappings
- file-mappings
- dtc-file-mappings
- group-mappings
- host-mappings - host-mappings
- dtc-host-mappings - dtc-host-mappings
- http-mappings - http-mappings
- dtc-http-mappings - dtc-http-mappings
- log-mappings
- metadata-mappings - metadata-mappings
- network-mappings
- dtc-network-mappings
- observer-mappings
- dtc-observer-mappings
- organization-mappings
- package-mappings
- process-mappings
- dtc-process-mappings
- related-mappings
- rule-mappings
- dtc-rule-mappings
- server-mappings
- service-mappings
- dtc-service-mappings
- source-mappings
- dtc-source-mappings
- pb-override-source-mappings
- threat-mappings
- tls-mappings
- url-mappings
- user_agent-mappings
- dtc-user_agent-mappings
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
data_stream: data_stream:

View File

@@ -14,9 +14,6 @@
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} {% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
include: include:
- ca
- elasticsearch.ca
- elasticsearch.ssl
- elasticsearch.config - elasticsearch.config
- elasticsearch.sostatus - elasticsearch.sostatus
@@ -64,7 +61,11 @@ so-elasticsearch:
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
- /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro - /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro
{% if GLOBALS.is_manager %}
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro - /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% endif %}
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
@@ -81,21 +82,22 @@ so-elasticsearch:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca - file: cacertz
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- require: - require:
- file: trusttheca
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- file: eslog4jfile - file: eslog4jfile
- file: nsmesdir - file: nsmesdir
- file: eslogdir - file: eslogdir
- file: cacertz
- x509: /etc/pki/elasticsearch.crt
- x509: /etc/pki/elasticsearch.key
- file: elasticp12perms - file: elasticp12perms
{% if GLOBALS.is_manager %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
- cmd: auth_users_roles_inode - cmd: auth_users_roles_inode
- cmd: auth_users_inode - cmd: auth_users_inode

View File

@@ -6,207 +6,26 @@
}, },
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n", "description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [ "processors": [
{ { "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
"set": { { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
"ignore_failure": true, { "split": { "if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')", "field":"data_stream.dataset", "separator":"\\.", "target_field":"datastream_dataset_temp", "ignore_missing":true } },
"field": "event.module", { "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
"value": "elastic_agent" { "set": { "if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'", "field":"event.module", "value":"{{ datastream_dataset_temp.0 }}", "ignore_failure":true, "ignore_empty_value":true, "description":"Fix EA network packet capture" } },
} { "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
}, { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}", "allow_duplicates": false } },
{ { "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
"split": { { "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
"if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", { "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
"field": "event.dataset", { "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
"separator": "\\.", { "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.dataset", "value": "import" } },
"target_field": "module_temp" { "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.namespace", "value": "so" } },
} { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
}, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
"split": { { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
"if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')", { "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
"field": "data_stream.dataset", {"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
"separator": "\\.", {"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
"target_field": "datastream_dataset_temp", { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
"ignore_missing": true
}
},
{
"set": {
"if": "ctx.module_temp != null",
"override": true,
"field": "event.module",
"value": "{{module_temp.0}}"
}
},
{
"set": {
"if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'",
"field": "event.module",
"value": "{{ datastream_dataset_temp.0 }}",
"ignore_failure": true,
"ignore_empty_value": true,
"description": "Fix EA network packet capture"
}
},
{
"gsub": {
"if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')",
"field": "event.dataset",
"pattern": "^[^.]*.",
"replacement": "",
"target_field": "dataset_tag_temp"
}
},
{
"append": {
"if": "ctx.dataset_tag_temp != null",
"field": "tags",
"value": "{{dataset_tag_temp}}",
"allow_duplicates": false
}
},
{
"set": {
"if": "ctx.network?.direction == 'egress'",
"override": true,
"field": "network.initiated",
"value": "true"
}
},
{
"set": {
"if": "ctx.network?.direction == 'ingress'",
"override": true,
"field": "network.initiated",
"value": "false"
}
},
{
"set": {
"if": "ctx.network?.type == 'ipv4'",
"override": true,
"field": "destination.ipv6",
"value": "false"
}
},
{
"set": {
"if": "ctx.network?.type == 'ipv6'",
"override": true,
"field": "destination.ipv6",
"value": "true"
}
},
{
"set": {
"if": "ctx.tags != null && ctx.tags.contains('import')",
"override": true,
"field": "data_stream.dataset",
"value": "import"
}
},
{
"set": {
"if": "ctx.tags != null && ctx.tags.contains('import')",
"override": true,
"field": "data_stream.namespace",
"value": "so"
}
},
{
"community_id": {
"if": "ctx.event?.dataset == 'endpoint.events.network'",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx.event?.module == 'fim'",
"override": true,
"field": "event.module",
"value": "file_integrity"
}
},
{
"rename": {
"if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'",
"ignore_missing": true,
"field": "winlog.event_data.Threat Name",
"target_field": "winlog.event_data.threat_name"
}
},
{
"set": {
"if": "ctx?.metadata?.kafka != null",
"field": "kafka.id",
"value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'",
"field": "event.module",
"value": "elasticsearch"
}
},
{
"append": {
"field": "related.ip",
"value": [
"{{source.ip}}",
"{{destination.ip}}"
],
"allow_duplicates": false,
"if": "ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null",
"ignore_failure": true
}
},
{
"foreach": {
"field": "host.ip",
"processor": {
"append": {
"field": "related.ip",
"value": "{{_ingest._value}}",
"allow_duplicates": false
}
},
"if": "ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null",
"ignore_missing": true,
"description": "Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"
}
},
{
"pipeline": {
"name": ".fleet_final_pipeline-1",
"ignore_missing_pipeline": true
}
},
{
"remove": {
"field": "event.agent_id_status",
"ignore_missing": true,
"if": "ctx?.event?.agent_id_status == 'auth_metadata_missing'"
}
},
{
"remove": {
"field": [
"message2",
"type",
"fields",
"category",
"module",
"dataset",
"event.dataset_temp",
"dataset_tag_temp",
"module_temp",
"datastream_dataset_temp"
],
"ignore_missing": true,
"ignore_failure": true
}
}
] ]
} }

View File

@@ -1,90 +1,9 @@
{ {
"description" : "kratos", "description" : "kratos",
"processors" : [ "processors" : [
{ {"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
"set": { {"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
"field": "audience", {"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
"value": "access", { "pipeline": { "name": "common" } }
"override": false,
"ignore_failure": true
}
},
{
"set": {
"field": "event.dataset",
"ignore_empty_value": true,
"ignore_failure": true,
"value": "kratos.{{{audience}}}",
"media_type": "text/plain"
}
},
{
"set": {
"field": "event.action",
"ignore_failure": true,
"copy_from": "msg"
}
},
{
"rename": {
"field": "http_request",
"target_field": "http.request",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http_response",
"target_field": "http.response",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.path",
"target_field": "http.uri",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.query",
"target_field": "http.query",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.headers.user-agent",
"target_field": "http.useragent",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "common"
}
}
] ]
} }

View File

@@ -27,13 +27,6 @@ elasticsearch:
readonly: True readonly: True
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
logsdb:
enabled:
description: Enables or disables the Elasticsearch logsdb index mode. When enabled, most logs-* datastreams will convert to logsdb from standard after rolling over.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
routing: routing:
allocation: allocation:
disk: disk:

View File

@@ -1,66 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
# Create a cert for elasticsearch
elasticsearch_key:
x509.private_key_managed:
- name: /etc/pki/elasticsearch.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticsearch_crt:
x509.certificate_managed:
- name: /etc/pki/elasticsearch.crt
- ca_server: {{ CA.server }}
- signing_policy: registry
- private_key: /etc/pki/elasticsearch.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:
- x509: /etc/pki/elasticsearch.key
elastickeyperms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.key
- mode: 640
- group: 930
elasticp12perms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.p12
- mode: 640
- group: 930
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -2,7 +2,7 @@
"template": { "template": {
"settings": { "settings": {
"index": { "index": {
"final_pipeline": "global@custom" "final_pipeline": ".fleet_final_pipeline-1"
} }
}, },
"mappings": { "mappings": {

View File

@@ -14,9 +14,8 @@ set -e
# Check to see if we have extracted the ca cert. # Check to see if we have extracted the ca cert.
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
# Make sure symbolic links are followed when copying from container docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
docker cp -L so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker cp -L so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker rm so-elasticsearchca docker rm so-elasticsearchca
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem

View File

@@ -121,7 +121,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
echo "Loading Security Onion index templates..." echo "Loading Security Onion index templates..."
shopt -s extglob shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %} {% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)" pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %} {% else %}
pattern="*" pattern="*"
{% endif %} {% endif %}

View File

@@ -9,6 +9,7 @@
include: include:
- salt.minion - salt.minion
- ssl
# Influx DB # Influx DB
influxconfdir: influxconfdir:

View File

@@ -11,7 +11,6 @@
{% set TOKEN = salt['pillar.get']('influxdb:token') %} {% set TOKEN = salt['pillar.get']('influxdb:token') %}
include: include:
- influxdb.ssl
- influxdb.config - influxdb.config
- influxdb.sostatus - influxdb.sostatus
@@ -60,8 +59,6 @@ so-influxdb:
{% endif %} {% endif %}
- watch: - watch:
- file: influxdbconf - file: influxdbconf
- x509: influxdb_key
- x509: influxdb_crt
- require: - require:
- file: influxdbconf - file: influxdbconf
- x509: influxdb_key - x509: influxdb_key

View File

@@ -1,55 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
influxdb_key:
x509.private_key_managed:
- name: /etc/pki/influxdb.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/influxdb.key') -%}
- prereq:
- x509: /etc/pki/influxdb.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Create a cert for the talking to influxdb
influxdb_crt:
x509.certificate_managed:
- name: /etc/pki/influxdb.crt
- ca_server: {{ CA.server }}
- signing_policy: influxdb
- private_key: /etc/pki/influxdb.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
influxkeyperms:
file.managed:
- replace: False
- name: /etc/pki/influxdb.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -68,8 +68,6 @@ so-kafka:
- file: kafka_server_jaas_properties - file: kafka_server_jaas_properties
{% endif %} {% endif %}
- file: kafkacertz - file: kafkacertz
- x509: kafka_crt
- file: kafka_pkcs12_perms
- require: - require:
- file: kafkacertz - file: kafkacertz

View File

@@ -6,11 +6,20 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %} {% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
{% set kafka_password = salt['pillar.get']('kafka:config:password') %} {% set kafka_password = salt['pillar.get']('kafka:config:password') %}
include: include:
- ca - ca.dirs
{% set global_ca_server = [] %}
{% set x509dict = salt['mine.get'](GLOBALS.manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set ca_server = global_ca_server[0] %}
{% if GLOBALS.pipeline == "KAFKA" %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
kafka_client_key: kafka_client_key:
@@ -30,12 +39,12 @@ kafka_client_key:
kafka_client_crt: kafka_client_crt:
x509.certificate_managed: x509.certificate_managed:
- name: /etc/pki/kafka-client.crt - name: /etc/pki/kafka-client.crt
- ca_server: {{ CA.server }} - ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka - signing_policy: kafka
- private_key: /etc/pki/kafka-client.key - private_key: /etc/pki/kafka-client.key
- CN: {{ GLOBALS.hostname }} - CN: {{ GLOBALS.hostname }}
- days_remaining: 7 - days_remaining: 0
- days_valid: 820 - days_valid: 820
- backup: True - backup: True
- timeout: 30 - timeout: 30
@@ -78,12 +87,12 @@ kafka_key:
kafka_crt: kafka_crt:
x509.certificate_managed: x509.certificate_managed:
- name: /etc/pki/kafka.crt - name: /etc/pki/kafka.crt
- ca_server: {{ CA.server }} - ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka - signing_policy: kafka
- private_key: /etc/pki/kafka.key - private_key: /etc/pki/kafka.key
- CN: {{ GLOBALS.hostname }} - CN: {{ GLOBALS.hostname }}
- days_remaining: 7 - days_remaining: 0
- days_valid: 820 - days_valid: 820
- backup: True - backup: True
- timeout: 30 - timeout: 30
@@ -94,7 +103,6 @@ kafka_crt:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}" - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
- onchanges: - onchanges:
- x509: /etc/pki/kafka.key - x509: /etc/pki/kafka.key
kafka_key_perms: kafka_key_perms:
file.managed: file.managed:
- replace: False - replace: False
@@ -140,12 +148,12 @@ kafka_logstash_key:
kafka_logstash_crt: kafka_logstash_crt:
x509.certificate_managed: x509.certificate_managed:
- name: /etc/pki/kafka-logstash.crt - name: /etc/pki/kafka-logstash.crt
- ca_server: {{ CA.server }} - ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka - signing_policy: kafka
- private_key: /etc/pki/kafka-logstash.key - private_key: /etc/pki/kafka-logstash.key
- CN: {{ GLOBALS.hostname }} - CN: {{ GLOBALS.hostname }}
- days_remaining: 7 - days_remaining: 0
- days_valid: 820 - days_valid: 820
- backup: True - backup: True
- timeout: 30 - timeout: 30
@@ -182,6 +190,7 @@ kafka_logstash_pkcs12_perms:
- group: 939 - group: 939
{% endif %} {% endif %}
{% endif %}
{% else %} {% else %}

View File

@@ -25,10 +25,11 @@ kibana:
discardCorruptObjects: "8.18.8" discardCorruptObjects: "8.18.8"
telemetry: telemetry:
enabled: False enabled: False
security:
showInsecureClusterWarning: False
xpack: xpack:
security: security:
secureCookies: true secureCookies: true
showInsecureClusterWarning: false
reporting: reporting:
kibanaServer: kibanaServer:
hostname: localhost hostname: localhost

View File

@@ -75,7 +75,6 @@ kratosconfig:
- group: 928 - group: 928
- mode: 600 - mode: 600
- template: jinja - template: jinja
- show_changes: False
- defaults: - defaults:
KRATOSMERGED: {{ KRATOSMERGED }} KRATOSMERGED: {{ KRATOSMERGED }}

View File

@@ -46,7 +46,6 @@ kratos:
ui_url: https://URL_BASE/ ui_url: https://URL_BASE/
login: login:
ui_url: https://URL_BASE/login/ ui_url: https://URL_BASE/login/
lifespan: 60m
error: error:
ui_url: https://URL_BASE/login/ ui_url: https://URL_BASE/login/
registration: registration:

View File

@@ -182,10 +182,6 @@ kratos:
global: True global: True
advanced: True advanced: True
helpLink: kratos.html helpLink: kratos.html
lifespan:
description: Defines the duration that a login form will remain valid.
global: True
helpLink: kratos.html
error: error:
ui_url: ui_url:
description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation. description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation.

View File

@@ -10,8 +10,9 @@
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %} {% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
{% set ASSIGNED_PIPELINES = LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %} {% set ASSIGNED_PIPELINES = LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
include: include:
- ssl
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
- elasticsearch - elasticsearch
{% endif %} {% endif %}

View File

@@ -63,7 +63,7 @@ logstash:
settings: settings:
lsheap: 500m lsheap: 500m
config: config:
api_x_http_x_host: 0.0.0.0 http_x_host: 0.0.0.0
path_x_logs: /var/log/logstash path_x_logs: /var/log/logstash
pipeline_x_workers: 1 pipeline_x_workers: 1
pipeline_x_batch_x_size: 125 pipeline_x_batch_x_size: 125

View File

@@ -12,7 +12,6 @@
{% set lsheap = LOGSTASH_MERGED.settings.lsheap %} {% set lsheap = LOGSTASH_MERGED.settings.lsheap %}
include: include:
- ca
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %} {% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
- elasticsearch.ca - elasticsearch.ca
{% endif %} {% endif %}
@@ -21,9 +20,9 @@ include:
- kafka.ca - kafka.ca
- kafka.ssl - kafka.ssl
{% endif %} {% endif %}
- logstash.ssl
- logstash.config - logstash.config
- logstash.sostatus - logstash.sostatus
- ssl
so-logstash: so-logstash:
docker_container.running: docker_container.running:
@@ -66,18 +65,22 @@ so-logstash:
- /opt/so/log/logstash:/var/log/logstash:rw - /opt/so/log/logstash:/var/log/logstash:rw
- /sys/fs/cgroup:/sys/fs/cgroup:ro - /sys/fs/cgroup:/sys/fs/cgroup:ro
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro - /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %} {% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro - /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro - /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro - /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro - /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
{% if GLOBALS.role != 'so-fleet' %}
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %} {% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% endif %} {% endif %}
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %} {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
{% endif %} {% endif %}
@@ -97,22 +100,11 @@ so-logstash:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: lsetcsync {% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-receiver'] %}
- file: trusttheca
{% if GLOBALS.is_manager %}
- file: elasticsearch_cacerts
- file: elasticsearch_capems
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- x509: etc_elasticfleet_logstash_crt
- x509: etc_elasticfleet_logstash_key - x509: etc_elasticfleet_logstash_key
- x509: etc_elasticfleetlumberjack_crt - x509: etc_elasticfleet_logstash_crt
- x509: etc_elasticfleetlumberjack_key
{% if GLOBALS.role != 'so-fleet' %}
- x509: etc_filebeat_crt
- file: logstash_filebeat_p8
{% endif %}
{% endif %} {% endif %}
- file: lsetcsync
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %} {% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
- file: ls_pipeline_{{assigned_pipeline}} - file: ls_pipeline_{{assigned_pipeline}}
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %} {% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
@@ -123,20 +115,17 @@ so-logstash:
- file: kafkacertz - file: kafkacertz
{% endif %} {% endif %}
- require: - require:
- file: trusttheca {% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
{% if GLOBALS.is_manager %}
- file: elasticsearch_cacerts
- file: elasticsearch_capems
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
- x509: etc_elasticfleet_logstash_crt
- x509: etc_elasticfleet_logstash_key
- x509: etc_elasticfleetlumberjack_crt
- x509: etc_elasticfleetlumberjack_key
{% if GLOBALS.role != 'so-fleet' %}
- x509: etc_filebeat_crt - x509: etc_filebeat_crt
- file: logstash_filebeat_p8
{% endif %} {% endif %}
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
{% if grains.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
- file: cacertz
- file: capemz
{% endif %} {% endif %}
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
- file: kafkacertz - file: kafkacertz

View File

@@ -5,10 +5,10 @@ input {
codec => es_bulk codec => es_bulk
request_headers_target_field => client_headers request_headers_target_field => client_headers
remote_host_target_field => client_host remote_host_target_field => client_host
ssl_enabled => true ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt" ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key" ssl_key => "/usr/share/logstash/filebeat.key"
ssl_client_authentication => "required" ssl_verify_mode => "peer"
} }
} }

View File

@@ -2,11 +2,11 @@ input {
elastic_agent { elastic_agent {
port => 5055 port => 5055
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ] tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ]
ssl_enabled => true ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt" ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key" ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
ssl_client_authentication => "required" ssl_verify_mode => "force_peer"
ecs_compatibility => v8 ecs_compatibility => v8
} }
} }

View File

@@ -2,7 +2,7 @@ input {
elastic_agent { elastic_agent {
port => 5056 port => 5056
tags => [ "elastic-agent", "fleet-lumberjack-input" ] tags => [ "elastic-agent", "fleet-lumberjack-input" ]
ssl_enabled => true ssl => true
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt" ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key" ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
ecs_compatibility => v8 ecs_compatibility => v8

View File

@@ -8,8 +8,8 @@ output {
document_id => "%{[metadata][_id]}" document_id => "%{[metadata][_id]}"
index => "so-ip-mappings" index => "so-ip-mappings"
silence_errors_in_log => ["version_conflict_engine_exception"] silence_errors_in_log => ["version_conflict_engine_exception"]
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
else { else {
@@ -25,8 +25,8 @@ output {
document_id => "%{[metadata][_id]}" document_id => "%{[metadata][_id]}"
pipeline => "%{[metadata][pipeline]}" pipeline => "%{[metadata][pipeline]}"
silence_errors_in_log => ["version_conflict_engine_exception"] silence_errors_in_log => ["version_conflict_engine_exception"]
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
else { else {
@@ -37,8 +37,8 @@ output {
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
pipeline => "%{[metadata][pipeline]}" pipeline => "%{[metadata][pipeline]}"
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
} }
@@ -49,8 +49,8 @@ output {
data_stream => true data_stream => true
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
ssl_enabled => true ssl => true
ssl_verification_mode=> "none" ssl_certificate_verification => false
} }
} }
} }

View File

@@ -13,8 +13,8 @@ output {
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
index => "endgame-%{+YYYY.MM.dd}" index => "endgame-%{+YYYY.MM.dd}"
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
} }

View File

@@ -56,7 +56,7 @@ logstash:
helpLink: logstash.html helpLink: logstash.html
global: False global: False
config: config:
api_x_http_x_host: http_x_host:
description: Host interface to listen to connections. description: Host interface to listen to connections.
helpLink: logstash.html helpLink: logstash.html
readonly: True readonly: True

View File

@@ -1,287 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states or sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode'] %}
# Start -- Elastic Fleet Logstash Input Cert
etc_elasticfleet_logstash_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-logstash.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-logstash.key') -%}
- prereq:
- x509: etc_elasticfleet_logstash_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_logstash_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-logstash.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-logstash.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-logstash.key -topk8 -out /etc/pki/elasticfleet-logstash.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_logstash_key
eflogstashperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- group: 939
chownelasticfleetlogstashcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.crt
- mode: 640
- user: 931
- group: 939
chownelasticfleetlogstashkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Logstash Input Cert
{% endif %} # endif is for not including HeavyNodes
# Start -- Elastic Fleet Node - Logstash Lumberjack Input / Output
# Cert needed on: Managers, Receivers
etc_elasticfleetlumberjack_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-lumberjack.key
- bits: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-lumberjack.key') -%}
- prereq:
- x509: etc_elasticfleetlumberjack_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleetlumberjack_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-lumberjack.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-lumberjack.key
- CN: {{ GLOBALS.node_ip }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-lumberjack.key -topk8 -out /etc/pki/elasticfleet-lumberjack.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleetlumberjack_key
eflogstashlumberjackperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- group: 939
chownilogstashelasticfleetlumberjackp8:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.p8
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.crt
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Node - Logstash Lumberjack Input / Output
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-receiver'] %}
etc_filebeat_key:
x509.private_key_managed:
- name: /etc/pki/filebeat.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/filebeat.key') -%}
- prereq:
- x509: etc_filebeat_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
etc_filebeat_crt:
x509.certificate_managed:
- name: /etc/pki/filebeat.crt
- ca_server: {{ CA.server }}
- signing_policy: filebeat
- private_key: /etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt"
- onchanges:
- x509: etc_filebeat_key
fbperms:
file.managed:
- replace: False
- name: /etc/pki/filebeat.key
- mode: 640
- group: 939
logstash_filebeat_p8:
file.managed:
- replace: False
- name: /etc/pki/filebeat.p8
- mode: 640
- user: 931
- group: 939
{% if grains.role not in ['so-heavynode', 'so-receiver'] %}
# Create Symlinks to the keys so I can distribute it to all the things
filebeatdir:
file.directory:
- name: /opt/so/saltstack/local/salt/filebeat/files
- makedirs: True
fbkeylink:
file.symlink:
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8
- target: /etc/pki/filebeat.p8
- user: socore
- group: socore
fbcrtlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt
- target: /etc/pki/filebeat.crt
- user: socore
- group: socore
{% endif %}
{% endif %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-sensor', 'so-searchnode', 'so-heavynode', 'so-fleet', 'so-idh', 'so-receiver'] %}
fbcertdir:
file.directory:
- name: /opt/so/conf/filebeat/etc/pki
- makedirs: True
conf_filebeat_key:
x509.private_key_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%}
- prereq:
- x509: conf_filebeat_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
conf_filebeat_crt:
x509.certificate_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
- ca_server: {{ CA.server }}
- signing_policy: filebeat
- private_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
# Convert the key to pkcs#8 so logstash will work correctly.
filebeatpkcs:
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:"
- onchanges:
- x509: conf_filebeat_key
filebeatkeyperms:
file.managed:
- replace: False
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- mode: 640
- group: 939
chownfilebeatp8:
file.managed:
- replace: False
- name: /opt/so/conf/filebeat/etc/pki/filebeat.p8
- mode: 640
- user: 931
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
elastic_curl_config_distributed: elastic_curl_config_distributed:
file.managed: file.managed:
- name: /opt/so/saltstack/local/salt/elasticsearch/curl.config - name: /opt/so/saltstack/local/salt/elasticsearch/curl.config

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
kibana_curl_config_distributed: kibana_curl_config_distributed:
file.managed: file.managed:
- name: /opt/so/conf/kibana/curl.config - name: /opt/so/conf/kibana/curl.config

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
include: include:
- elasticsearch.auth - elasticsearch.auth
- kratos - kratos

View File

@@ -716,18 +716,6 @@ function checkMine() {
} }
} }
function create_ca_pillar() {
local capillar=/opt/so/saltstack/local/pillar/ca/init.sls
printf '%s\n'\
"ca:"\
" server: $MINION_ID"\
" " > $capillar
if [ $? -ne 0 ]; then
log "ERROR" "Failed to add $MINION_ID to $capillar"
return 1
fi
}
function createEVAL() { function createEVAL() {
log "INFO" "Creating EVAL configuration for minion $MINION_ID" log "INFO" "Creating EVAL configuration for minion $MINION_ID"
is_pcaplimit=true is_pcaplimit=true
@@ -839,6 +827,7 @@ function createHEAVYNODE() {
add_elastic_agent_to_minion || return 1 add_elastic_agent_to_minion || return 1
add_sensor_to_minion || return 1 add_sensor_to_minion || return 1
add_strelka_to_minion || return 1 add_strelka_to_minion || return 1
add_redis_to_minion || return 1
add_telegraf_to_minion || return 1 add_telegraf_to_minion || return 1
} }
@@ -1024,7 +1013,6 @@ function setupMinionFiles() {
managers=("EVAL" "STANDALONE" "IMPORT" "MANAGER" "MANAGERSEARCH") managers=("EVAL" "STANDALONE" "IMPORT" "MANAGER" "MANAGERSEARCH")
if echo "${managers[@]}" | grep -qw "$NODETYPE"; then if echo "${managers[@]}" | grep -qw "$NODETYPE"; then
add_sensoroni_with_analyze_to_minion || return 1 add_sensoroni_with_analyze_to_minion || return 1
create_ca_pillar || return 1
else else
add_sensoroni_to_minion || return 1 add_sensoroni_to_minion || return 1
fi fi

View File

@@ -87,12 +87,6 @@ check_err() {
113) 113)
echo 'No route to host' echo 'No route to host'
;; ;;
160)
echo 'Incompatiable Elasticsearch upgrade'
;;
161)
echo 'Required intermediate Elasticsearch upgrade not complete'
;;
*) *)
echo 'Unhandled error' echo 'Unhandled error'
echo "$err_msg" echo "$err_msg"
@@ -165,7 +159,7 @@ EOF
} }
airgap_update_dockers() { airgap_update_dockers() {
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]] || [[ ! -z "$ISOLOC" ]]; then
# Let's copy the tarball # Let's copy the tarball
if [[ ! -f $AGDOCKER/registry.tar ]]; then if [[ ! -f $AGDOCKER/registry.tar ]]; then
echo "Unable to locate registry. Exiting" echo "Unable to locate registry. Exiting"
@@ -200,24 +194,13 @@ update_registry() {
check_airgap() { check_airgap() {
# See if this is an airgap install # See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]') AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
if [[ ! -z "$ISOLOC" ]]; then
# flag to use ISO for non-airgap installs, won't be used everywhere is_airgap -eq 0 is used. Used to speed up network soups by using local storage for large files.
nonairgap_useiso=0
else
nonairgap_useiso=1
fi
if [[ "$AIRGAP" == "true" ]]; then if [[ "$AIRGAP" == "true" ]]; then
is_airgap=0 is_airgap=0
else
is_airgap=1
fi
# use ISO if its airgap install OR ISOLOC was set with -f <path>
if [[ "$AIRGAP" == "true" ]] || [[ $nonairgap_useiso -eq 0 ]]; then
UPDATE_DIR=/tmp/soagupdate/SecurityOnion UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/minimal/Packages AGREPO=/tmp/soagupdate/minimal/Packages
else
is_airgap=1
fi fi
} }
@@ -336,19 +319,6 @@ clone_to_tmp() {
fi fi
} }
# there is a function like this in so-minion, but we cannot source it since args required for so-minion
create_ca_pillar() {
local ca_pillar_dir="/opt/so/saltstack/local/pillar/ca"
local ca_pillar_file="${ca_pillar_dir}/init.sls"
echo "Updating CA pillar configuration"
mkdir -p "$ca_pillar_dir"
echo "ca: {}" > "$ca_pillar_file"
so-yaml.py add "$ca_pillar_file" ca.server "$MINIONID"
chown -R socore:socore "$ca_pillar_dir"
}
disable_logstash_heavynodes() { disable_logstash_heavynodes() {
c=0 c=0
printf "\nChecking for heavynodes and disabling Logstash if they exist\n" printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
@@ -364,22 +334,6 @@ disable_logstash_heavynodes() {
done done
} }
disable_redis_heavynodes() {
local c=0
printf "\nChecking for heavynodes and disabling Redis if they exist\n"
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
c=1
echo "Disabling Redis for: $file"
so-yaml.py replace "$file" redis.enabled False
fi
done
if [[ "$c" != 0 ]]; then
FINAL_MESSAGE_QUEUE+=("Redis has been disabled on all heavynodes.")
fi
}
enable_highstate() { enable_highstate() {
echo "Enabling highstate." echo "Enabling highstate."
salt-call state.enable highstate -l info --local salt-call state.enable highstate -l info --local
@@ -408,6 +362,7 @@ masterlock() {
echo "base:" > $TOPFILE echo "base:" > $TOPFILE
echo " $MINIONID:" >> $TOPFILE echo " $MINIONID:" >> $TOPFILE
echo " - ca" >> $TOPFILE echo " - ca" >> $TOPFILE
echo " - ssl" >> $TOPFILE
echo " - elasticsearch" >> $TOPFILE echo " - elasticsearch" >> $TOPFILE
} }
@@ -473,7 +428,6 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190 [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200 [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201 [[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201
[[ "$INSTALLEDVERSION" == 2.4.201 ]] && up_to_2.4.210
true true
} }
@@ -507,7 +461,6 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190 [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200 [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201 [[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201
[[ "$POSTVERSION" == 2.4.201 ]] && post_to_2.4.210
true true
} }
@@ -664,6 +617,9 @@ post_to_2.4.180() {
} }
post_to_2.4.190() { post_to_2.4.190() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# Only need to update import / eval nodes # Only need to update import / eval nodes
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
update_import_fleet_output update_import_fleet_output
@@ -696,21 +652,6 @@ post_to_2.4.201() {
POSTVERSION=2.4.201 POSTVERSION=2.4.201
} }
post_to_2.4.210() {
echo "Rolling over Kratos index to apply new index template"
rollover_index "logs-kratos-so"
disable_redis_heavynodes
initialize_elasticsearch_indices "so-case so-casehistory so-assistant-session so-assistant-chat"
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.210
}
repo_sync() { repo_sync() {
echo "Sync the local repo." echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -972,7 +913,9 @@ up_to_2.4.180() {
} }
up_to_2.4.190() { up_to_2.4.190() {
echo "Nothing to do for 2.4.190" # Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.190 INSTALLEDVERSION=2.4.190
} }
@@ -985,20 +928,6 @@ up_to_2.4.200() {
INSTALLEDVERSION=2.4.200 INSTALLEDVERSION=2.4.200
} }
up_to_2.4.201() {
echo "Nothing to do for 2.4.201"
INSTALLEDVERSION=2.4.201
}
up_to_2.4.210() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
create_ca_pillar
INSTALLEDVERSION=2.4.210
}
add_hydra_pillars() { add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
@@ -1395,8 +1324,14 @@ so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids
} }
up_to_2.4.201() {
echo "Nothing to do for 2.4.201"
INSTALLEDVERSION=2.4.201
}
determine_elastic_agent_upgrade() { determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap update_elastic_agent_airgap
else else
set +e set +e
@@ -1690,252 +1625,6 @@ verify_latest_update_script() {
fi fi
} }
verify_es_version_compatibility() {
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt"
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
# supported upgrade paths for SO-ES versions
declare -A es_upgrade_map=(
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
["8.17.3"]="8.18.4 8.18.6 8.18.8"
["8.18.4"]="8.18.6 8.18.8 9.0.8"
["8.18.6"]="8.18.8 9.0.8"
["8.18.8"]="9.0.8"
)
# Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=(
["8.18.8"]="2.4.190-20251024"
)
# Get current Elasticsearch version
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
else
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
exit 160
fi
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
exit 160
fi
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
fi
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
if [[ -f "$es_required_version_statefile" ]]; then
# required so verification script should have already been created
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script $es_verification_script
fi
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
echo -e "\n##############################################################################################################################\n"
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss!"
# create script using version in statefile
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
echo -e "\n##############################################################################################################################\n"
fi
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
if [[ -z "$compatible_versions" ]]; then
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
next_step_so_version=${es_to_so_version[$first_es_required_version]}
required_es_upgrade_version="$first_es_required_version"
else
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
required_es_upgrade_version="${compatible_versions##* }"
fi
echo -e "\n##############################################################################################################################\n"
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
echo "$required_es_upgrade_version" > "$es_required_version_statefile"
# We expect to upgrade to the latest compatiable minor version of ES
create_intermediate_upgrade_verification_script $es_verification_script
if [[ $is_airgap -eq 0 ]]; then
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
echo -e "\n##############################################################################################################################\n"
exit 160
else
# preserve BRANCH value if set originally
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_version="$BRANCH"
else
local originally_requested_so_version="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n"
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
echo -e \"\n##############################################################################################################################\n\" && \
echo -e \"Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
&& timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e \"\n##############################################################################################################################\n\" \
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
fi
fi
}
create_intermediate_upgrade_verification_script() {
# After an intermediate upgrade, verify that ALL nodes running Elasticsearch are at the expected version BEFORE proceeding to the next upgrade step. This is a CRITICAL step
local verification_script="$1"
cat << 'EOF' > "$verification_script"
#!/bin/bash
SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE="/root/so_intermediate_upgrade_verification_failures.log"
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
EXPECTED_ES_VERSION="$1"
if [[ -z "$EXPECTED_ES_VERSION" ]]; then
echo -e "\nExpected Elasticsearch version not provided. Usage: $0 <expected_es_version>"
exit 1
fi
if [[ -f "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" ]]; then
mv "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE.$CURRENT_TIME"
fi
check_heavynodes_es_version() {
# Check if heavynodes are in this grid
if ! salt-key -l accepted | grep -q 'heavynode$'; then
# No heavynodes, skip version check
echo "No heavynodes detected in this Security Onion deployment. Skipping heavynode Elasticsearch version verification."
return 0
fi
echo -e "\nOne or more heavynodes detected. Verifying their Elasticsearch versions."
local retries=20
local retry_count=0
local delay=180
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2> /dev/null)
local exit_status=$?
# Check that all heavynodes returned good data
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch version from one or more heavynodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$heavynode_versions" | jq -s --arg expected "\"$EXPECTED_ES_VERSION\"" --exit-status 'all(.[]; . | to_entries | all(.[]; .value == $expected))' > /dev/null; then
echo -e "\nAll heavynodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more heavynodes are not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$heavynode_versions" | jq -s 'add'
echo "$heavynode_versions" | jq -s 'add' >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\n Stopping automatic upgrade to latest Security Onion version. Heavynodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\n Heavynodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a long time depending on network link between Manager and Heavynodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
return 1
}
check_searchnodes_es_version() {
local retries=20
local retry_count=0
local delay=180
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 --fail 2>&1)
local exit_status=$?
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch versions from searchnodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$cluster_versions" | jq --arg expected "$EXPECTED_ES_VERSION" --exit-status '.nodes | to_entries | all(.[].value.version; . == $expected)' > /dev/null; then
echo "All Searchnodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$cluster_versions" | jq '.nodes | to_entries | map({(.value.name): .value.version}) | sort | add'
echo "$cluster_versions" >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\nStopping automatic upgrade to latest version. Searchnodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\nSearchnodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a while depending on cluster size / network link between Manager and Searchnodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "$cluster_versions" > "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
return 1
}
# Need to add a check for heavynodes and ensure all heavynodes get their own "cluster" upgraded before moving on to final upgrade.
check_searchnodes_es_version || exit 1
check_heavynodes_es_version || exit 1
# Remove required version state file after successful verification
rm -f "$2"
exit 0
EOF
}
# Keeping this block in case we need to do a hotfix that requires salt update # Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() { apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
@@ -1962,7 +1651,7 @@ apply_hotfix() {
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion" systemctl_func "start" "salt-minion"
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
fi fi
else else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
@@ -2014,10 +1703,15 @@ main() {
MINION_ROLE=$(lookup_role) MINION_ROLE=$(lookup_role)
echo "Found that Security Onion $INSTALLEDVERSION is currently installed." echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
echo "" echo ""
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
# Let's mount the ISO since this is airgap or non-airgap with -f used # Let's mount the ISO since this is airgap
airgap_mounted airgap_mounted
else else
# if not airgap but -f was used
if [[ ! -z "$ISOLOC" ]]; then
airgap_mounted
AGDOCKER=/tmp/soagupdate/docker
fi
echo "Cloning Security Onion github repo into $UPDATE_DIR." echo "Cloning Security Onion github repo into $UPDATE_DIR."
echo "Removing previous upgrade sources." echo "Removing previous upgrade sources."
rm -rf $UPDATE_DIR rm -rf $UPDATE_DIR
@@ -2027,8 +1721,6 @@ main() {
echo "Verifying we have the latest soup script." echo "Verifying we have the latest soup script."
verify_latest_update_script verify_latest_update_script
verify_es_version_compatibility
echo "Let's see if we need to update Security Onion." echo "Let's see if we need to update Security Onion."
upgrade_check upgrade_check
upgrade_space upgrade_space
@@ -2037,8 +1729,7 @@ main() {
upgrade_check_salt upgrade_check_salt
set -e set -e
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
# non-airgap with -f used can do an initial ISO repo update and so-repo-sync cron job will sync any diff later via network
update_airgap_repo update_airgap_repo
dnf clean all dnf clean all
check_os_updates check_os_updates
@@ -2157,7 +1848,7 @@ main() {
echo "" echo ""
echo "Running a highstate. This could take several minutes." echo "Running a highstate. This could take several minutes."
set +e set +e
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate highstate
set -e set -e
@@ -2170,7 +1861,7 @@ main() {
check_saltmaster_status check_saltmaster_status
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
# Stop long-running scripts to allow potentially updated scripts to load on the next execution. # Stop long-running scripts to allow potentially updated scripts to load on the next execution.
killall salt-relay.sh killall salt-relay.sh

View File

@@ -6,6 +6,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
# Drop the correct nginx config based on role # Drop the correct nginx config based on role
nginxconfdir: nginxconfdir:
file.directory: file.directory:

View File

@@ -8,14 +8,81 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'nginx/map.jinja' import NGINXMERGED %} {% from 'nginx/map.jinja' import NGINXMERGED %}
{% set ca_server = GLOBALS.minion_id %}
include: include:
- nginx.ssl
- nginx.config - nginx.config
- nginx.sostatus - nginx.sostatus
{% if GLOBALS.role != 'so-fleet' %}
{% set container_config = 'so-nginx' %} {% if grains.role not in ['so-fleet'] %}
{# if the user has selected to replace the crt and key in the ui #}
{% if NGINXMERGED.ssl.replace_cert %}
managerssl_key:
file.managed:
- name: /etc/pki/managerssl.key
- source: salt://nginx/ssl/ssl.key
- mode: 640
- group: 939
- watch_in:
- docker_container: so-nginx
managerssl_crt:
file.managed:
- name: /etc/pki/managerssl.crt
- source: salt://nginx/ssl/ssl.crt
- mode: 644
- watch_in:
- docker_container: so-nginx
{% else %}
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
# Create a cert for the reverse proxy
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ ca_server }}
- signing_policy: managerssl
- private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
{% endif %}
msslkeyperms:
file.managed:
- replace: False
- name: /etc/pki/managerssl.key
- mode: 640
- group: 939
make-rule-dir-nginx: make-rule-dir-nginx:
file.directory: file.directory:
- name: /nsm/rules - name: /nsm/rules
@@ -26,9 +93,13 @@ make-rule-dir-nginx:
- group - group
- show_changes: False - show_changes: False
{% else %} {% endif %}
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #} {# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
{% if GLOBALS.role == 'so-fleet' %}
{% set container_config = 'so-nginx-fleet-node' %} {% set container_config = 'so-nginx-fleet-node' %}
{% else %}
{% set container_config = 'so-nginx' %}
{% endif %} {% endif %}
so-nginx: so-nginx:
@@ -83,15 +154,6 @@ so-nginx:
- watch: - watch:
- file: nginxconf - file: nginxconf
- file: nginxconfdir - file: nginxconfdir
{% if GLOBALS.is_manager %}
{% if NGINXMERGED.ssl.replace_cert %}
- file: managerssl_key
- file: managerssl_crt
{% else %}
- x509: managerssl_key
- x509: managerssl_crt
{% endif%}
{% endif %}
- require: - require:
- file: nginxconf - file: nginxconf
{% if GLOBALS.is_manager %} {% if GLOBALS.is_manager %}

View File

@@ -1,87 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'nginx/map.jinja' import NGINXMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.role != 'so-fleet' %}
{# if the user has selected to replace the crt and key in the ui #}
{% if NGINXMERGED.ssl.replace_cert %}
managerssl_key:
file.managed:
- name: /etc/pki/managerssl.key
- source: salt://nginx/ssl/ssl.key
- mode: 640
- group: 939
- watch_in:
- docker_container: so-nginx
managerssl_crt:
file.managed:
- name: /etc/pki/managerssl.crt
- source: salt://nginx/ssl/ssl.crt
- mode: 644
- watch_in:
- docker_container: so-nginx
{% else %}
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
# Create a cert for the reverse proxy
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ CA.server }}
- signing_policy: managerssl
- private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
- watch_in:
- docker_container: so-nginx
{% endif %}
msslkeyperms:
file.managed:
- replace: False
- name: /etc/pki/managerssl.key
- mode: 640
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,22 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states%}
stenoca:
file.directory:
- name: /opt/so/conf/steno/certs
- user: 941
- group: 939
- makedirs: True
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -57,6 +57,12 @@ stenoconf:
PCAPMERGED: {{ PCAPMERGED }} PCAPMERGED: {{ PCAPMERGED }}
STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}" STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}"
stenoca:
file.directory:
- name: /opt/so/conf/steno/certs
- user: 941
- group: 939
pcaptmpdir: pcaptmpdir:
file.directory: file.directory:
- name: /nsm/pcaptmp - name: /nsm/pcaptmp

View File

@@ -10,7 +10,6 @@
include: include:
- pcap.ca
- pcap.config - pcap.config
- pcap.sostatus - pcap.sostatus

View File

@@ -7,6 +7,9 @@
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'redis/map.jinja' import REDISMERGED %} {% from 'redis/map.jinja' import REDISMERGED %}
include:
- ssl
# Redis Setup # Redis Setup
redisconfdir: redisconfdir:
file.directory: file.directory:

View File

@@ -9,8 +9,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- ca
- redis.ssl
- redis.config - redis.config
- redis.sostatus - redis.sostatus
@@ -33,7 +31,11 @@ so-redis:
- /nsm/redis/data:/data:rw - /nsm/redis/data:/data:rw
- /etc/pki/redis.crt:/certs/redis.crt:ro - /etc/pki/redis.crt:/certs/redis.crt:ro
- /etc/pki/redis.key:/certs/redis.key:ro - /etc/pki/redis.key:/certs/redis.key:ro
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/certs/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro - /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro
{% endif %}
{% if DOCKER.containers['so-redis'].custom_bind_mounts %} {% if DOCKER.containers['so-redis'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
@@ -53,14 +55,16 @@ so-redis:
{% endif %} {% endif %}
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch: - watch:
- file: trusttheca
- x509: redis_crt
- x509: redis_key
- file: /opt/so/conf/redis/etc - file: /opt/so/conf/redis/etc
- require: - require:
- file: trusttheca - file: redisconf
- x509: redis_crt - x509: redis_crt
- x509: redis_key - x509: redis_key
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
delete_so-redis_so-status.disabled: delete_so-redis_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -1,54 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
redis_key:
x509.private_key_managed:
- name: /etc/pki/redis.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/redis.key') -%}
- prereq:
- x509: /etc/pki/redis.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
redis_crt:
x509.certificate_managed:
- name: /etc/pki/redis.crt
- ca_server: {{ CA.server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: registry
- private_key: /etc/pki/redis.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
rediskeyperms:
file.managed:
- replace: False
- name: /etc/pki/redis.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -6,6 +6,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
# Create the config directory for the docker registry # Create the config directory for the docker registry
dockerregistryconfdir: dockerregistryconfdir:
file.directory: file.directory:

View File

@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- registry.ssl
- registry.config - registry.config
- registry.sostatus - registry.sostatus
@@ -54,9 +53,6 @@ so-dockerregistry:
- retry: - retry:
attempts: 5 attempts: 5
interval: 30 interval: 30
- watch:
- x509: registry_crt
- x509: registry_key
- require: - require:
- file: dockerregistryconf - file: dockerregistryconf
- x509: registry_crt - x509: registry_crt

View File

@@ -1,77 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
include:
- ca
# Delete directory if it exists at the key path
registry_key_cleanup:
file.absent:
- name: /etc/pki/registry.key
- onlyif:
- test -d /etc/pki/registry.key
registry_key:
x509.private_key_managed:
- name: /etc/pki/registry.key
- keysize: 4096
- backup: True
- new: True
- require:
- file: registry_key_cleanup
{% if salt['file.file_exists']('/etc/pki/registry.key') -%}
- prereq:
- x509: /etc/pki/registry.crt
{%- endif %}
- retry:
attempts: 15
interval: 10
# Delete directory if it exists at the crt path
registry_crt_cleanup:
file.absent:
- name: /etc/pki/registry.crt
- onlyif:
- test -d /etc/pki/registry.crt
# Create a cert for the docker registry
registry_crt:
x509.certificate_managed:
- name: /etc/pki/registry.crt
- ca_server: {{ CA.server }}
- subjectAltName: DNS:{{ GLOBALS.manager }}, IP:{{ GLOBALS.manager_ip }}
- signing_policy: registry
- private_key: /etc/pki/registry.key
- CN: {{ GLOBALS.manager }}
- days_remaining: 7
- days_valid: 820
- backup: True
- require:
- file: registry_crt_cleanup
- timeout: 30
- retry:
attempts: 15
interval: 10
regkeyperms:
file.managed:
- replace: False
- name: /etc/pki/registry.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -46,6 +46,33 @@ def start(interval=60):
mine_update(minion) mine_update(minion)
continue continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived # Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try: try:

Some files were not shown because too many files have changed in this diff Show More