Merge branch '2.4/main' of github.com:Security-Onion-Solutions/securityonion into 2.4/main

This commit is contained in:
Mike Reeves
2024-11-16 20:41:20 -05:00
620 changed files with 16431 additions and 1679 deletions

View File

@@ -536,7 +536,7 @@ secretGroup = 4
[allowlist]
description = "global allow lists"
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''']
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''']
paths = [
'''gitleaks.toml''',
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',

View File

@@ -11,7 +11,6 @@ body:
description: Which version of Security Onion 2.4.x are you asking about?
options:
-
- 2.4 Pre-release (Beta, Release Candidate)
- 2.4.10
- 2.4.20
- 2.4.30
@@ -22,6 +21,7 @@ body:
- 2.4.80
- 2.4.90
- 2.4.100
- 2.4.110
- Other (please provide detail below)
validations:
required: true
@@ -32,9 +32,10 @@ body:
options:
-
- Security Onion ISO image
- Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc.
- Network installation on Ubuntu
- Network installation on Debian
- Cloud image (Amazon, Azure, Google)
- Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc. (unsupported)
- Network installation on Ubuntu (unsupported)
- Network installation on Debian (unsupported)
- Other (please provide detail below)
validations:
required: true

View File

@@ -1,17 +1,17 @@
### 2.4.70-20240529 ISO image released on 2024/05/29
### 2.4.110-20241010 ISO image released on 2024/10/10
### Download and Verify
2.4.70-20240529 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
2.4.110-20241010 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.110-20241010.iso
MD5: 8FCCF31C2470D1ABA380AF196B611DEC
SHA1: EE5E8F8C14819E7A1FE423E6920531A97F39600B
SHA256: EF5E781D50D50660F452ADC54FD4911296ECBECED7879FA8E04687337CA89BEC
MD5: A8003DEBC4510D538F06238D9DBB86C0
SHA1: 441DE90A192C8FE8BEBAB9ACE1A3CC18F71A2B1F
SHA256: B087A0D12FC2CA3CCD02BD52E52421F4F60DC09BF826337A057E05A04D114CCE
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.110-20241010.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,27 +25,29 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.110-20241010.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.110-20241010.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.70-20240529.iso.sig securityonion-2.4.70-20240529.iso
gpg --verify securityonion-2.4.110-20241010.iso.sig securityonion-2.4.110-20241010.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Wed 29 May 2024 11:40:59 AM EDT using RSA key ID FE507013
gpg: Signature made Thu 10 Oct 2024 07:05:30 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
```
If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network.
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
https://docs.securityonion.net/en/2.4/installation.html

1
HOTFIX
View File

@@ -0,0 +1 @@
20241010

View File

@@ -5,9 +5,11 @@
| Version | Supported |
| ------- | ------------------ |
| 2.4.x | :white_check_mark: |
| 2.3.x | :white_check_mark: |
| 2.3.x | :x: |
| 16.04.x | :x: |
Security Onion 2.3 has reached End Of Life and is no longer supported.
Security Onion 16.04 has reached End Of Life and is no longer supported.
## Reporting a Vulnerability

View File

@@ -1 +1 @@
2.4.70
2.4.110

View File

@@ -19,4 +19,4 @@ role:
receiver:
standalone:
searchnode:
sensor:
sensor:

View File

@@ -0,0 +1,34 @@
{% set node_types = {} %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='elasticsearch:enabled:true',
fun='network.ip_addrs',
tgt_type='pillar') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
elasticsearch:
nodes:
{% for node_type, values in node_types.items() %}
{{node_type}}:
{% for hostname, ip in values.items() %}
{{hostname}}:
ip: {{ip}}
{% endfor %}
{% endfor %}

2
pillar/kafka/nodes.sls Normal file
View File

@@ -0,0 +1,2 @@
kafka:
nodes:

View File

@@ -1,16 +1,15 @@
{% set node_types = {} %}
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ',
tgt='logstash:enabled:true',
fun='network.ip_addrs',
tgt_type='compound') | dictsort()
tgt_type='pillar') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = cached_grains[minionid]['host'] %}
{% set node_type = minionid.split('_')[1] %}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}

34
pillar/redis/nodes.sls Normal file
View File

@@ -0,0 +1,34 @@
{% set node_types = {} %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='redis:enabled:true',
fun='network.ip_addrs',
tgt_type='pillar') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
redis:
nodes:
{% for node_type, values in node_types.items() %}
{{node_type}}:
{% for hostname, ip in values.items() %}
{{hostname}}:
ip: {{ip}}
{% endfor %}
{% endfor %}

View File

@@ -47,10 +47,12 @@ base:
- kibana.adv_kibana
- kratos.soc_kratos
- kratos.adv_kratos
- redis.nodes
- redis.soc_redis
- redis.adv_redis
- influxdb.soc_influxdb
- influxdb.adv_influxdb
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet
@@ -61,6 +63,9 @@ base:
- backup.adv_backup
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- stig.soc_stig
'*_sensor':
@@ -144,10 +149,12 @@ base:
- idstools.adv_idstools
- kratos.soc_kratos
- kratos.adv_kratos
- redis.nodes
- redis.soc_redis
- redis.adv_redis
- influxdb.soc_influxdb
- influxdb.adv_influxdb
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet
@@ -176,6 +183,9 @@ base:
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_heavynode':
- elasticsearch.auth
@@ -209,17 +219,22 @@ base:
- logstash.nodes
- logstash.soc_logstash
- logstash.adv_logstash
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth
{% endif %}
- redis.nodes
- redis.soc_redis
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_receiver':
- logstash.nodes
@@ -232,6 +247,10 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- soc.license
'*_import':
- secrets
@@ -291,3 +310,5 @@ base:
'*_desktop':
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license

14
pyci.sh
View File

@@ -15,12 +15,16 @@ TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
echo "Missing dependencies. Consider running the following command:"
echo " python -m pip install flake8 pytest pytest-cov"
if [ ! -d .venv ]; then
python -m venv .venv
fi
source .venv/bin/activate
if ! pip install flake8 pytest pytest-cov pyyaml; then
echo "Unable to install dependencies."
exit 1
fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"

View File

@@ -103,7 +103,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-managersearch': [
'salt.master',
@@ -125,7 +126,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-searchnode': [
'ssl',
@@ -134,7 +136,9 @@
'firewall',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka.ca',
'kafka.ssl'
],
'so-standalone': [
'salt.master',
@@ -159,7 +163,8 @@
'schedule',
'tcpreplay',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-sensor': [
'ssl',
@@ -190,12 +195,15 @@
'telegraf',
'firewall',
'schedule',
'docker_clean'
'docker_clean',
'kafka',
'stig'
],
'so-desktop': [
'ssl',
'docker_clean',
'telegraf'
'telegraf',
'stig'
],
}, grain='role') %}

View File

@@ -1,6 +1,3 @@
mine_functions:
x509.get_pem_entries: [/etc/pki/ca.crt]
x509_signing_policies:
filebeat:
- minions: '*'
@@ -70,3 +67,17 @@ x509_signing_policies:
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 820
- copypath: /etc/pki/issued_certs/
kafka:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "digitalSignature, keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- extendedKeyUsage: "serverAuth, clientAuth"
- days_valid: 820
- copypath: /etc/pki/issued_certs/

View File

@@ -14,6 +14,11 @@ net.core.wmem_default:
sysctl.present:
- value: 26214400
# Users are not a fan of console messages
kernel.printk:
sysctl.present:
- value: "3 4 1 3"
# Remove variables.txt from /tmp - This is temp
rmvariablesfile:
file.absent:

View File

@@ -57,6 +57,12 @@ copy_so-yaml_manager_tools_sbin:
- force: True
- preserve: True
copy_so-repo-sync_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- preserve: True
# This section is used to put the new script in place so that it can be called during soup.
# It is faster than calling the states that normally manage them to put them in place.
copy_so-common_sbin:
@@ -94,6 +100,13 @@ copy_so-yaml_sbin:
- force: True
- preserve: True
copy_so-repo-sync_sbin:
file.copy:
- name: /usr/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- force: True
- preserve: True
{% else %}
fix_23_soup_sbin:
cmd.run:

View File

@@ -8,12 +8,6 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
DEFAULT_SALT_DIR=/opt/so/saltstack/default
DOC_BASE_URL="https://docs.securityonion.net/en/2.4"
@@ -31,6 +25,11 @@ if ! echo "$PATH" | grep -q "/usr/sbin"; then
export PATH="$PATH:/usr/sbin"
fi
# See if a proxy is set. If so use it.
if [ -f /etc/profile.d/so-proxy.sh ]; then
. /etc/profile.d/so-proxy.sh
fi
# Define a banner to separate sections
banner="========================================================================="
@@ -169,6 +168,46 @@ check_salt_minion_status() {
return $status
}
# Compare es versions and return the highest version
compare_es_versions() {
# Save the original IFS
local OLD_IFS="$IFS"
IFS=.
local i ver1=($1) ver2=($2)
# Restore the original IFS
IFS="$OLD_IFS"
# Determine the maximum length between the two version arrays
local max_len=${#ver1[@]}
if [[ ${#ver2[@]} -gt $max_len ]]; then
max_len=${#ver2[@]}
fi
# Compare each segment of the versions
for ((i=0; i<max_len; i++)); do
# If a segment in ver1 or ver2 is missing, set it to 0
if [[ -z ${ver1[i]} ]]; then
ver1[i]=0
fi
if [[ -z ${ver2[i]} ]]; then
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
echo "$1"
return 0
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
echo "$2"
return 0
fi
done
echo "$1" # If versions are equal, return either
return 0
}
copy_new_files() {
# Copy new files over to the salt dir
cd $UPDATE_DIR
@@ -258,11 +297,6 @@ fail() {
exit 1
}
get_random_value() {
length=${1:-20}
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
@@ -271,6 +305,27 @@ get_agent_count() {
fi
}
get_elastic_agent_vars() {
local path="${1:-/opt/so/saltstack/default}"
local defaultsfile="${path}/salt/elasticsearch/defaults.yaml"
if [ -f "$defaultsfile" ]; then
ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]')
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
else
fail "Could not find salt/elasticsearch/defaults.yaml"
fi
}
get_random_value() {
length=${1:-20}
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
gpg_rpm_import() {
if [[ $is_oracle ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
@@ -622,6 +677,8 @@ has_uppercase() {
}
update_elastic_agent() {
local path="${1:-/opt/so/saltstack/default}"
get_elastic_agent_vars "$path"
echo "Checking if Elastic Agent update is necessary..."
download_and_verify "$ELASTIC_AGENT_URL" "$ELASTIC_AGENT_MD5_URL" "$ELASTIC_AGENT_FILE" "$ELASTIC_AGENT_MD5" "$ELASTIC_AGENT_EXPANSION_DIR"
}

View File

@@ -50,6 +50,7 @@ container_list() {
"so-idh"
"so-idstools"
"so-influxdb"
"so-kafka"
"so-kibana"
"so-kratos"
"so-logstash"
@@ -64,7 +65,7 @@ container_list() {
"so-strelka-manager"
"so-suricata"
"so-telegraf"
"so-zeek"
"so-zeek"
)
else
TRUSTED_CONTAINERS=(
@@ -111,6 +112,10 @@ update_docker_containers() {
container_list
fi
# all the images using ELASTICSEARCHDEFAULTS.elasticsearch.version
# does not include so-elastic-fleet since that container uses so-elastic-agent image
local IMAGES_USING_ES_VERSION=("so-elasticsearch")
rm -rf $SIGNPATH >> "$LOG_FILE" 2>&1
mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1
@@ -138,15 +143,36 @@ update_docker_containers() {
$PROGRESS_CALLBACK $i
fi
if [[ " ${IMAGES_USING_ES_VERSION[*]} " =~ [[:space:]]${i}[[:space:]] ]]; then
# this is an es container so use version defined in elasticsearch defaults.yaml
local UPDATE_DIR='/tmp/sogh/securityonion'
if [ ! -d "$UPDATE_DIR" ]; then
UPDATE_DIR=/securityonion
fi
local v1=0
local v2=0
if [[ -f "$UPDATE_DIR/salt/elasticsearch/defaults.yaml" ]]; then
v1=$(egrep " +version: " "$UPDATE_DIR/salt/elasticsearch/defaults.yaml" | awk -F: '{print $2}' | tr -d '[:space:]')
fi
if [[ -f "$DEFAULT_SALT_DIR/salt/elasticsearch/defaults.yaml" ]]; then
v2=$(egrep " +version: " "$DEFAULT_SALT_DIR/salt/elasticsearch/defaults.yaml" | awk -F: '{print $2}' | tr -d '[:space:]')
fi
local highest_es_version=$(compare_es_versions "$v1" "$v2")
local image=$i:$highest_es_version$IMAGE_TAG_SUFFIX
local sig_url=https://sigs.securityonion.net/es-$highest_es_version/$image.sig
else
# this is not an es container so use the so version for the version
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
local sig_url=https://sigs.securityonion.net/$VERSION/$image.sig
fi
# Pull down the trusted docker image
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
run_check_net_err \
"docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \
"Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1
# Get signature
run_check_net_err \
"curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" \
"curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' $sig_url --output $SIGNPATH/$image.sig" \
"Could not pull signature file for $image, please ensure connectivity to https://sigs.securityonion.net " \
noretry >> "$LOG_FILE" 2>&1
# Dump our hash values

View File

@@ -95,6 +95,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in start_workers" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in buffer_initialize" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready
@@ -147,6 +149,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncing rule" # false positive (rule sync log line includes rule name which can contain 'error')
fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -170,6 +173,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to gather disk name" # InfluxDB known error, can't read disks because the container doesn't have them mounted
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
@@ -205,6 +209,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
fi
RESULT=0
@@ -241,6 +246,7 @@ exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
exclude_log "/nsm/kafka/data/" # ignore Kafka data directory from log check.
for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file"

View File

@@ -9,6 +9,9 @@
. /usr/sbin/so-common
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
{%- if salt['grains.get']('sosmodel', '') %}
{%- set model = salt['grains.get']('sosmodel') %}
model={{ model }}
@@ -16,33 +19,42 @@ model={{ model }}
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
exit 0
fi
for i in "${software_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=true
is_hwraid=false
break
fi
done
for i in "${hardware_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=false
is_hwraid=true
break
fi
done
{%- else %}
echo "This is not an appliance"
exit 0
{%- endif %}
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
is_bossraid=true
fi
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
is_swraid=true
fi
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
is_hwraid=true
fi
check_nsm_raid() {
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
if [[ $APPLIANCE == '1' ]]; then
if [[ "$model" == "SOS500" || "$model" == "SOS500-DE02" ]]; then
#This doesn't have raid
HWRAID=0
else
if [[ -n $PERCCLI ]]; then
HWRAID=0
elif [[ -n $MEGACTL ]]; then
HWRAID=0
else
HWRAID=1
fi
fi
fi
}
@@ -50,17 +62,27 @@ check_nsm_raid() {
check_boss_raid() {
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter")
BOSSNVMECLI=$(/usr/local/bin/mnv_cli info -o vd -i 0 | grep Functional)
# Check to see if this is a SM based system
if [[ -z $MVTEST ]]; then
if [[ -n $MVCLI ]]; then
# Is this NVMe Boss Raid?
if [[ "$model" =~ "-DE02" ]]; then
if [[ -n $BOSSNVMECLI ]]; then
BOSSRAID=0
else
BOSSRAID=1
fi
else
# This doesn't have boss raid so lets make it 0
BOSSRAID=0
# Check to see if this is a SM based system
if [[ -z $MVTEST ]]; then
if [[ -n $MVCLI ]]; then
BOSSRAID=0
else
BOSSRAID=1
fi
else
# This doesn't have boss raid so lets make it 0
BOSSRAID=0
fi
fi
}
@@ -79,14 +101,13 @@ SWRAID=0
BOSSRAID=0
HWRAID=0
if [[ $is_hwraid ]]; then
if [[ "$is_hwraid" == "true" ]]; then
check_nsm_raid
check_boss_raid
fi
if [[ $is_bossraid ]]; then
check_boss_raid
fi
if [[ $is_swraid ]]; then
if [[ "$is_softwareraid" == "true" ]]; then
check_software_raid
check_boss_raid
fi
sum=$(($SWRAID + $BOSSRAID + $HWRAID))

View File

@@ -10,7 +10,7 @@
. /usr/sbin/so-common
. /usr/sbin/so-image-common
REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)}
REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"}
REPLAYSPEED=${REPLAYSPEED:-10}
mkdir -p /opt/so/samples

View File

@@ -187,3 +187,12 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-kafka':
final_octet: 88
port_bindings:
- 0.0.0.0:9092:9092
- 0.0.0.0:9093:9093
- 0.0.0.0:8778:8778
custom_bind_mounts: []
extra_hosts: []
extra_env: []

View File

@@ -20,41 +20,41 @@ dockergroup:
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.6.21-1
- docker-ce: 5:24.0.3-1~debian.12~bookworm
- docker-ce-cli: 5:24.0.3-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm
- containerd.io: 1.7.21-1
- docker-ce: 5:27.2.0-1~debian.12~bookworm
- docker-ce-cli: 5:27.2.0-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:27.2.0-1~debian.12~bookworm
- hold: True
- update_holds: True
{% elif grains.oscodename == 'jammy' %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.6.21-1
- docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy
- containerd.io: 1.7.21-1
- docker-ce: 5:27.2.0-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:27.2.0-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.22.04~jammy
- hold: True
- update_holds: True
{% else %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.4.9-1
- docker-ce: 5:20.10.8~3-0~ubuntu-focal
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal
- containerd.io: 1.7.21-1
- docker-ce: 5:27.2.0-1~ubuntu.20.04~focal
- docker-ce-cli: 5:27.2.0-1~ubuntu.20.04~focal
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.20.04~focal
- hold: True
- update_holds: True
{% endif %}
{% endif %}
{% else %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.6.21-3.1.el9
- docker-ce: 24.0.4-1.el9
- docker-ce-cli: 24.0.4-1.el9
- docker-ce-rootless-extras: 24.0.4-1.el9
- containerd.io: 1.7.21-3.1.el9
- docker-ce: 3:27.2.0-1.el9
- docker-ce-cli: 1:27.2.0-1.el9
- docker-ce-rootless-extras: 27.2.0-1.el9
- hold: True
- update_holds: True
{% endif %}

View File

@@ -101,3 +101,4 @@ docker:
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions
so-kafka: *dockerOptions

View File

@@ -1,10 +1,10 @@
elastalert:
enabled:
description: You can enable or disable Elastalert.
description: Enables or disables the ElastAlert 2 process. This process is critical for ensuring alerts arrive in SOC, and for outbound notification delivery.
helpLink: elastalert.html
alerter_parameters:
title: Alerter Parameters
description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
title: Custom Configuration Parameters
description: Optional configuration parameters made available as defaults for all rules and alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available configuration parameters. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml

View File

@@ -1,4 +1,4 @@
elastic_fleet_package_registry:
enabled:
description: You can enable or disable Elastic Fleet Package Registry.
description: Enables or disables the Fleet package registry process. This process must remain enabled to allow Elastic Agent packages to be updated.
advanced: True

View File

@@ -8,7 +8,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- elasticagent.config
- elasticagent.sostatus

View File

@@ -0,0 +1,4 @@
elasticagent:
enabled:
description: Enables or disables the Elastic Agent process. This process must remain enabled to allow collection of node events.
advanced: True

View File

@@ -73,6 +73,63 @@ eapackageupgrade:
- template: jinja
{% if GLOBALS.role != "so-fleet" %}
soresourcesrepoconfig:
git.config_set:
- name: safe.directory
- value: /nsm/securityonion-resources
- global: True
- user: socore
{% if not GLOBALS.airgap %}
soresourcesrepoclone:
git.latest:
- name: https://github.com/Security-Onion-Solutions/securityonion-resources.git
- target: /nsm/securityonion-resources
- rev: 'main'
- depth: 1
- force_reset: True
{% endif %}
elasticdefendconfdir:
file.directory:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/rulesets
- user: 947
- group: 939
- makedirs: True
elasticdefenddisabled:
file.managed:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml
- source: salt://elasticfleet/files/soc/elastic-defend-disabled-filters.yaml
- user: 947
- group: 939
- mode: 600
elasticdefendcustom:
file.managed:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters-raw
- source: salt://elasticfleet/files/soc/elastic-defend-custom-filters.yaml
- user: 947
- group: 939
- mode: 600
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
cron-elastic-defend-filters:
cron.{{ap}}:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- identifier: elastic-defend-filters
- user: root
- minute: '0'
- hour: '3'
- daymonth: '*'
- month: '*'
- dayweek: '*'
eaintegrationsdir:
file.directory:
- name: /opt/so/conf/elastic-fleet/integrations

View File

@@ -8,6 +8,8 @@ elasticfleet:
endpoints_enrollment: ''
es_token: ''
grid_enrollment: ''
defend_filters:
enable_auto_configuration: False
logging:
zeek:
excluded:
@@ -36,6 +38,7 @@ elasticfleet:
- aws
- azure
- barracuda
- barracuda_cloudgen_firewall
- carbonblack_edr
- cef
- checkpoint
@@ -66,6 +69,7 @@ elasticfleet:
- http_endpoint
- httpjson
- iis
- imperva_cloud_waf
- journald
- juniper
- juniper_srx
@@ -97,6 +101,7 @@ elasticfleet:
- symantec_endpoint
- system
- tcp
- tenable_io
- tenable_sc
- ti_abusech
- ti_anomali

View File

@@ -17,17 +17,21 @@ include:
- elasticfleet.sostatus
- ssl
{% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet:
cmd.run:
- name: so-elasticsearch-wait
{% endif %}
# If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update
- retry: True
- retry:
attempts: 4
interval: 30
{% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection
@@ -35,7 +39,9 @@ so-elastic-fleet-auto-configure-logstash-outputs:
so-elastic-fleet-auto-configure-server-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update
- retry: True
- retry:
attempts: 4
interval: 30
{% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
@@ -43,12 +49,16 @@ so-elastic-fleet-auto-configure-server-urls:
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry: True
- retry:
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry: True
- retry:
attempts: 4
interval: 30
{% endif %}
@@ -134,6 +144,19 @@ so-elastic-agent-grid-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-agent-grid-upgrade
- retry: True
so-elastic-fleet-integration-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
so-elastic-defend-manage-filters-file-watch:
cmd.run:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- onchanges:
- file: elasticdefendcustom
- file: elasticdefenddisabled
{% endif %}
{% endif %}
delete_so-elastic-fleet_so-status.disabled:

View File

@@ -0,0 +1,19 @@
{
"package": {
"name": "fleet_server",
"version": ""
},
"name": "fleet_server-1",
"namespace": "default",
"policy_id": "FleetServer_hostname",
"vars": {},
"inputs": {
"fleet_server-fleet-server": {
"enabled": true,
"vars": {
"custom": "server.ssl.supported_protocols: [\"TLSv1.2\", \"TLSv1.3\"]\nserver.ssl.cipher_suites: [ \"ECDHE-RSA-AES-128-GCM-SHA256\", \"ECDHE-RSA-AES-256-GCM-SHA384\", \"ECDHE-RSA-AES-128-CBC-SHA\", \"ECDHE-RSA-AES-256-CBC-SHA\", \"RSA-AES-128-GCM-SHA256\", \"RSA-AES-256-GCM-SHA384\"]"
},
"streams": {}
}
}
}

View File

@@ -5,7 +5,7 @@
"package": {
"name": "endpoint",
"title": "Elastic Defend",
"version": "8.10.2"
"version": "8.14.0"
},
"enabled": true,
"policy_id": "endpoints-initial",

View File

@@ -11,7 +11,7 @@
"winlogs-winlog": {
"enabled": true,
"streams": {
"winlog.winlog": {
"winlog.winlogs": {
"enabled": true,
"vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational",

View File

@@ -20,7 +20,7 @@
],
"data_stream.dataset": "import",
"custom": "",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.43.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.38.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.43.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.43.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.38.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.59.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.45.1\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.59.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.59.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.45.1\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [
"import"
]

View File

@@ -0,0 +1,27 @@
title: 'Template 1'
id: 'This needs to be a UUIDv4 id - https://www.uuidgenerator.net/version4'
description: 'Short description detailing what this rule is filtering and why.'
references: 'Relevant urls, etc'
author: '@SecurityOnion'
date: 'MM/DD/YY'
event_type: 'dns_query'
filter_type: 'exclude'
filter:
selection_1:
TargetField: 'QueryName'
Condition: 'end with'
Pattern: '.thawte.com'
---
title: 'Template 2'
id: 'This needs to be a UUIDv4 id - https://www.uuidgenerator.net/version4'
description: 'Short description detailing what this rule is filtering and why.'
references: 'Relevant urls, etc'
author: '@SecurityOnion'
date: 'MM/DD/YY'
event_type: 'process_creation'
filter_type: 'exclude'
filter:
selection_1:
TargetField: 'ParentImage'
Condition: 'is'
Pattern: 'C:\Windows\Microsoft.NET\Framework\v4.0.30319\ngentask.exe'

View File

@@ -0,0 +1,3 @@
'9EDAA51C-BB12-49D9-8748-2B61371F2E7D':
Date: '10/10/2024'
Notes: 'Example Disabled Filter - Leave this entry here, just copy and paste as needed.'

View File

@@ -1,6 +1,6 @@
elasticfleet:
enabled:
description: You can enable or disable Elastic Fleet.
description: Enables or disables the Elastic Fleet process. This process is critical for managing Elastic Agents.
advanced: True
helpLink: elastic-fleet.html
enable_manager_output:
@@ -9,6 +9,24 @@ elasticfleet:
global: True
forcedType: bool
helpLink: elastic-fleet.html
files:
soc:
elastic-defend-disabled-filters__yaml:
title: Disabled Elastic Defend filters
description: Enter the ID of the filter that should be disabled.
syntax: yaml
file: True
global: True
helpLink: elastic-fleet.html
advanced: True
elastic-defend-custom-filters__yaml:
title: Custom Elastic Defend filters
description: Enter custom filters seperated by ---
syntax: yaml
file: True
global: True
helpLink: elastic-fleet.html
advanced: True
logging:
zeek:
excluded:
@@ -16,6 +34,12 @@ elasticfleet:
forcedType: "[]string"
helpLink: zeek.html
config:
defend_filters:
enable_auto_configuration:
description: Enable auto-configuration and management of the Elastic Defend Exclusion filters.
global: True
helpLink: elastic-fleet.html
advanced: True
server:
custom_fqdn:
description: Custom FQDN for Agents to connect to. One per line.

View File

@@ -0,0 +1,251 @@
from datetime import datetime
import sys
import getopt
from so_elastic_defend_filters_helper import *
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
# Define mappings for Target Field, Event Type, Conditions
TARGET_FIELD_MAPPINGS = {
"Image": "process.executable",
"ParentImage": "process.parent.executable",
"CommandLine": "process.command_line",
"ParentCommandLine": "process.parent.command_line",
"DestinationHostname": "destination.domain",
"QueryName": "dns.question.name",
"DestinationIp": "destination.ip",
"TargetObject": "registry.path",
"TargetFilename": "file.path"
}
DATASET_MAPPINGS = {
"process_create": "endpoint.events.process",
"network_connection": "endpoint.events.network",
"file_create": "endpoint.events.file",
"file_delete": "endpoint.events.file",
"registry_event": "endpoint.events.registry",
"dns_query": "endpoint.events.network"
}
CONDITION_MAPPINGS = {
"is": ("included", "match"),
"end with": ("included", "wildcard"),
"begin with": ("included", "wildcard"),
"contains": ("included", "wildcard")
}
# Extract entries for a rule
def extract_entries(data, event_type):
entries = []
filter_data = data.get('filter', {})
for value in filter_data.values():
target_field = TARGET_FIELD_MAPPINGS.get(value.get('TargetField', ''))
condition = value.get('Condition', '')
pattern = value.get('Pattern', '')
if condition not in CONDITION_MAPPINGS:
logging.error(f"Invalid condition: {condition}")
# Modify the pattern based on the condition
pattern = modify_pattern(condition, pattern)
operator, match_type = CONDITION_MAPPINGS[condition]
entries.append({
"field": target_field,
"operator": operator,
"type": match_type,
"value": pattern
})
# Add the event.dataset entry from DATASET_MAPPINGS
dataset_value = DATASET_MAPPINGS.get(event_type, '')
if dataset_value:
entries.append({
"field": "event.dataset",
"operator": "included",
"type": "match",
"value": dataset_value
})
else:
logging.error(f"No dataset mapping found for event_type: {event_type}")
return entries
# Build the JSON
def build_json_entry(entries, guid, event_type, context):
return {
"comments": [],
"entries": entries,
"item_id": guid,
"name": f"SO - {event_type} - {guid}",
"description": f"{context}\n\n <<- Note: This filter is managed by Security Onion. ->>",
"namespace_type": "agnostic",
"tags": ["policy:all"],
"type": "simple",
"os_types": ["windows"],
"entries": entries
}
# Check to see if the rule is disabled
# If it is, make sure it is not active
def disable_check(guid, disabled_rules, username, password):
if guid in disabled_rules:
logging.info(f"Rule {guid} is in the disabled rules list, confirming that is is actually disabled...")
existing_rule = api_request("GET", guid, username, password)
if existing_rule:
if api_request("DELETE", guid, username, password):
logging.info(f"Successfully deleted rule {guid}")
return True, "deleted"
else:
logging.error(f"Error deleting rule {guid}.")
return True, "Error deleting"
return True, "NOP"
return False, None
def modify_pattern(condition, pattern):
"""
Modify the pattern based on the condition.
- 'end with': Add '*' to the beginning of the pattern.
- 'begin with': Add '*' to the end of the pattern.
- 'contains': Add '*' to both the beginning and end of the pattern.
"""
if isinstance(pattern, list):
# Apply modification to each pattern in the list if it's a list of patterns
return [modify_pattern(condition, p) for p in pattern]
if condition == "end with":
return f"*{pattern}"
elif condition == "begin with":
return f"{pattern}*"
elif condition == "contains":
return f"*{pattern}*"
return pattern
def process_rule_update_or_create(guid, json_entry, username, password):
existing_rule = api_request("GET", guid, username, password)
if existing_rule:
existing_rule_data = extract_relevant_fields(existing_rule)
new_rule_data = extract_relevant_fields(json_entry)
if generate_hash(existing_rule_data) != generate_hash(new_rule_data):
logging.info(f"Updating rule {guid}")
json_entry.pop("list_id", None)
api_request("PUT", guid, username, password, json_data=json_entry)
return "updated"
logging.info(f"Rule {guid} is up to date.")
return "no_change"
else:
logging.info(f"Creating new rule {guid}")
json_entry["list_id"] = "endpoint_event_filters"
api_request("POST", guid, username, password, json_data=json_entry)
return "new"
# Main function for processing rules
def process_rules(yaml_files, disabled_rules, username, password):
stats = {"rule_count": 0, "new": 0, "updated": 0, "no_change": 0, "disabled": 0, "deleted": 0}
for data in yaml_files:
logging.info(f"Processing rule: {data.get('id', '')}")
event_type = data.get('event_type', '')
guid = data.get('id', '')
dataset = DATASET_MAPPINGS.get(event_type, '')
context = data.get('description', '')
rule_deleted, state = disable_check(guid, disabled_rules, username, password)
if rule_deleted:
stats["disabled"] += 1
if state == "deleted":
stats["deleted"] += 1
continue
# Extract entries and build JSON
entries = extract_entries(data, event_type)
json_entry = build_json_entry(entries, guid, event_type, context)
# Process rule creation or update
status = process_rule_update_or_create(guid, json_entry, username, password)
stats[status] += 1
stats["rule_count"] += 1
return stats
def parse_args(argv):
try:
opts, args = getopt.getopt(argv, "i:d:c:f:", ["input=", "disabled=", "credentials=", "flags_file="])
except getopt.GetoptError:
print("Usage: python so-elastic-defend-manage-filters.py -c <credentials_file> -d <disabled_file> -i <folder_of_yaml_files> [-f <flags_file>]")
sys.exit(2)
return opts
def load_flags(file_path):
with open(file_path, 'r') as flags_file:
return flags_file.read().splitlines()
def validate_inputs(credentials_file, disabled_file, yaml_directories):
if not credentials_file or not disabled_file or not yaml_directories:
print("Usage: python so-elastic-defend-manage-filters.py -c <credentials_file> -d <disabled_file> -i <folder_of_yaml_files> [-f <flags_file>]")
sys.exit(2)
def main(argv):
credentials_file = ""
disabled_file = ""
yaml_directories = []
opts = parse_args(argv)
for opt, arg in opts:
if opt in ("-c", "--credentials"):
credentials_file = arg
elif opt in ("-d", "--disabled"):
disabled_file = arg
elif opt in ("-i", "--input"):
yaml_directories.append(arg)
elif opt in ("-f", "--flags_file"):
flags = load_flags(arg)
return main(argv + flags)
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"\n{timestamp}")
validate_inputs(credentials_file, disabled_file, yaml_directories)
credentials = load_credentials(credentials_file)
if not credentials:
raise Exception("Failed to load credentials")
username, password = extract_auth_details(credentials)
if not username or not password:
raise Exception("Invalid credentials format")
custom_rules_input = '/opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters-raw'
custom_rules_output = '/opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters'
prepare_custom_rules(custom_rules_input, custom_rules_output)
disabled_rules = load_disabled(disabled_file)
total_stats = {"rule_count": 0, "new": 0, "updated": 0, "no_change": 0, "disabled": 0, "deleted": 0}
for yaml_dir in yaml_directories:
yaml_files = load_yaml_files(yaml_dir)
stats = process_rules(yaml_files, disabled_rules, username, password)
for key in total_stats:
total_stats[key] += stats[key]
logging.info(f"\nProcessing Summary")
logging.info(f" - Total processed rules: {total_stats['rule_count']}")
logging.info(f" - New rules: {total_stats['new']}")
logging.info(f" - Updated rules: {total_stats['updated']}")
logging.info(f" - Disabled rules: {total_stats['deleted']}")
logging.info(f" - Rules with no changes: {total_stats['no_change']}")
logging.info(f"Rule status Summary")
logging.info(f" - Active rules: {total_stats['rule_count'] - total_stats['disabled']}")
logging.info(f" - Disabled rules: {total_stats['disabled']}")
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"Execution completed at: {timestamp}")
if __name__ == "__main__":
main(sys.argv[1:])

View File

@@ -102,6 +102,70 @@ elastic_fleet_package_is_installed() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
}
elastic_fleet_agent_policy_ids() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
}
elastic_fleet_agent_policy_names() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
}
elastic_fleet_integration_policy_names() {
AGENT_POLICY=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_policy_package_name() {
AGENT_POLICY=$1
INTEGRATION=$2
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_policy_package_version() {
AGENT_POLICY=$1
INTEGRATION=$2
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version'
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve package version for '$INTEGRATION' in '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_id() {
AGENT_POLICY=$1
INTEGRATION=$2
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_policy_dryrun_upgrade() {
INTEGRATION_ID=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
if [ $? -ne 0 ]; then
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
exit 1
fi
}
elastic_fleet_policy_create() {
NAME=$1

View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-elastic-fleet-common
# Get all the fleet policies
json_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true')
# Extract the IDs that start with "FleetServer_"
POLICY=$(echo "$json_output" | jq -r '.items[] | select(.id | startswith("FleetServer_")) | .id')
# Iterate over each ID in the POLICY variable
for POLICYNAME in $POLICY; do
printf "\nUpdating Policy: $POLICYNAME\n"
# First get the Integration ID
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$POLICYNAME" | jq -r '.item.package_policies[] | select(.package.name == "fleet_server") | .id')
# Modify the default integration policy to update the policy_id and an with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "$POLICYNAME" --arg name "fleet_server-$POLICYNAME" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Now update the integration policy using the modified JSON
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
done

View File

@@ -12,7 +12,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# First, check for any package upgrades
/usr/sbin/so-elastic-fleet-package-upgrade
# Second, configure Elastic Defend Integration seperately
# Second, update Fleet Server policies
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
# Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints

View File

@@ -0,0 +1,62 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-elastic-fleet-common
curl_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/)
if [ $? -ne 0 ]; then
echo "Error: Failed to connect to Kibana."
exit 1
fi
IFS=$'\n'
agent_policies=$(elastic_fleet_agent_policy_ids)
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
for AGENT_POLICY in $agent_policies; do
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
for INTEGRATION in $integrations; do
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
# Get package name so we know what package to look for when checking the current and latest available version
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
# Get currently installed version of package
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
# Get latest available version of package
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
# Get integration ID
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
# Dry run of the upgrade
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
echo "Upgrading $INTEGRATION..."
echo "Starting dry run..."
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
# If no errors with dry run, proceed with actual upgrade
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
echo "No errors detected. Proceeding with upgrade..."
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
if [ $? -ne 0 ]; then
echo "Error: Upgrade failed for integration ID '$INTEGRATION_ID'."
exit 1
fi
else
echo "Errors detected during dry run. Stopping upgrade..."
exit 1
fi
fi
fi
done
done
echo

View File

@@ -0,0 +1,128 @@
import hashlib
import os
import json
import yaml
import requests
from requests.auth import HTTPBasicAuth
import shutil
# Extract 'entries', 'description' and 'os_types' fields
def extract_relevant_fields(filter):
return {
'entries': filter.get('entries', []),
'description': filter.get('description', '')
}
# Sort for consistency, so that a hash can be generated
def sorted_data(value):
if isinstance(value, dict):
# Recursively sort the dictionary by key
return {k: sorted_data(v) for k, v in sorted(value.items())}
elif isinstance(value, list):
# Sort lists; for dictionaries, sort by a specific key
return sorted(value, key=lambda x: tuple(sorted(x.items())) if isinstance(x, dict) else x)
return value
# Generate a hash based on sorted relevant fields
def generate_hash(data):
sorted_data_string = json.dumps(sorted_data(data), sort_keys=True)
return hashlib.sha256(sorted_data_string.encode('utf-8')).hexdigest()
# Load Elasticsearch credentials from the config file
def load_credentials(config_path):
with open(config_path, 'r') as file:
for line in file:
if line.startswith("user"):
credentials = line.split('=', 1)[1].strip().strip('"')
return credentials
return None
# Extract username and password from credentials
def extract_auth_details(credentials):
if ':' in credentials:
return credentials.split(':', 1)
return None, None
# Generalized API request function
def api_request(method, guid, username, password, json_data=None):
headers = {
'kbn-xsrf': 'true',
'Content-Type': 'application/json'
}
auth = HTTPBasicAuth(username, password)
if method == "POST":
url = "http://localhost:5601/api/exception_lists/items?namespace_type=agnostic"
else:
url = f"http://localhost:5601/api/exception_lists/items?item_id={guid}&namespace_type=agnostic"
response = requests.request(method, url, headers=headers, auth=auth, json=json_data)
if response.status_code in [200, 201]:
return response.json() if response.content else True
elif response.status_code == 404 and method == "GET":
return None
else:
print(f"Error with {method} request: {response.status_code} - {response.text}")
return False
# Load YAML data for GUIDs to skip
def load_disabled(disabled_file_path):
if os.path.exists(disabled_file_path):
with open(disabled_file_path, 'r') as file:
return yaml.safe_load(file) or {}
return {}
def load_yaml_files(*dirs):
yaml_files = []
for dir_path in dirs:
if os.path.isdir(dir_path):
# Recurse through the directory and subdirectories
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if file_name.endswith(".yaml"):
full_path = os.path.join(root, file_name)
with open(full_path, 'r') as f:
try:
yaml_content = yaml.safe_load(f)
yaml_files.append(yaml_content)
except yaml.YAMLError as e:
print(f"Error loading {full_path}: {e}")
else:
print(f"Invalid directory: {dir_path}")
return yaml_files
def prepare_custom_rules(input_file, output_dir):
# Clear the output directory first
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir, exist_ok=True)
try:
# Load the YAML file
with open(input_file, 'r') as f:
docs = yaml.safe_load_all(f)
for doc in docs:
if 'id' not in doc:
print(f"Skipping rule, no 'id' found: {doc}")
continue
if doc.get('title') in ["Template 1", "Template 2"]:
print(f"Skipping template rule with title: {doc['title']}")
continue
# Create a filename using the 'id' field
file_name = os.path.join(output_dir, f"{doc['id']}.yaml")
# Write the individual YAML file
with open(file_name, 'w') as output_file:
yaml.dump(doc, output_file, default_flow_style=False)
print(f"Created file: {file_name}")
except yaml.YAMLError as e:
print(f"Error parsing YAML: {e}")
except Exception as e:
print(f"Error processing file: {e}")

View File

@@ -13,13 +13,16 @@
LOG="/opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log"
# get the variables needed such as ELASTIC_AGENT_TARBALL_VERSION
get_elastic_agent_vars
# Check to see if we are already running
NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING gen installers script processes running...exiting." >>$LOG && exit 0
for i in {1..30}
do
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys?perPage=100" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',')
if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi
done
@@ -36,6 +39,7 @@ printf "\n### Creating a temp directory at /nsm/elastic-agent-workspace\n"
rm -rf /nsm/elastic-agent-workspace
mkdir -p /nsm/elastic-agent-workspace
printf "\n### Extracting outer tarball and then each individual tarball/zip\n"
tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz -C /nsm/elastic-agent-workspace/
unzip -q /nsm/elastic-agent-workspace/elastic-agent-*.zip -d /nsm/elastic-agent-workspace/

View File

@@ -5,6 +5,7 @@
# this file except in compliance with the Elastic License 2.0.
. /usr/sbin/so-common
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
# Only run on Managers
if ! is_manager_node; then
@@ -27,14 +28,14 @@ OUTDATED_LIST=$(jq -r '.items | map(.id) | (tojson)' <<< "$RAW_JSON")
if [ "$OUTDATED_LIST" != '[]' ]; then
AGENTNUMBERS=$(jq -r '.total' <<< "$RAW_JSON")
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic $ELASTIC_AGENT_TARBALL_VERSION...\n\n"
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n"
# Generate updated JSON payload
JSON_STRING=$(jq -n --arg ELASTICVERSION $ELASTIC_AGENT_TARBALL_VERSION --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
JSON_STRING=$(jq -n --arg ELASTICVERSION {{ELASTICSEARCHDEFAULTS.elasticsearch.version}} --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
# Update Node Agents
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
else
printf "No Agents need updates... Exiting\n\n"
exit 0
fi
fi

View File

@@ -21,64 +21,104 @@ function update_logstash_outputs() {
# Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}
function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
# Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi
{% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_kafka" ]; then
printf "Failed to query for current Kafka Outputs..."
exit 1
fi
declare -a NEW_LIST=()
# Get the current list of kafka outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=()
# Query for the current Grid Nodes that are running kafka
KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local')
# Query for Kafka nodes with Broker role and add hostname to list
while IFS= read -r line; do
NEW_LIST+=("$line")
done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES)
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %}
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi
# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=()
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %}
# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}
# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %}
# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}
# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
@@ -87,9 +127,28 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
# Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed
printf "Checking if the correct output policy is set as default\n"
OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON)
OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON)
if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then
printf "Default output policy needs to be updated.\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
else
printf "Default output policy is set - no update needed.\n"
fi
exit 0
else
printf "\nHashes don't match - update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
fi

View File

@@ -53,7 +53,8 @@ fi
printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
### Create Outputs & Fleet URLs ###
### Create Outputs, Fleet Policy and Fleet URLs ###
# Create the Manager Elasticsearch Output first and set it as the default output
printf "\nAdd Manager Elasticsearch Output...\n"
ESCACRT=$(openssl x509 -in $INTCA)
JSON_STRING=$( jq -n \
@@ -62,7 +63,21 @@ JSON_STRING=$( jq -n \
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
printf "\n\n"
printf "\nCreate Logstash Output Config if node is not an Import or Eval install\n"
# Create the Manager Fleet Server Host Agent Policy
# This has to be done while the Elasticsearch Output is set to the default Output
printf "Create Manager Fleet Server Policy...\n"
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
# Modify the default integration policy to update the policy_id with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Add the Fleet Server Integration to the new Fleet Policy
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
# Now we can create the Logstash Output and set it to to be the default Output
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
@@ -77,6 +92,11 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
printf "\n\n"
{%- endif %}
printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
/usr/sbin/so-kafka-fleet-output-policy
{% endif %}
# Add Manager Hostname & URL Base to Fleet Host URLs
printf "\nAdd SO-Manager Fleet URL\n"
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
@@ -96,16 +116,6 @@ printf "\n\n"
# Load Elasticsearch templates
/usr/sbin/so-elasticsearch-templates-load
# Manager Fleet Server Host
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" "120"
#Temp Fixup for ES Output bug
JSON_STRING=$( jq -n \
--arg NAME "FleetServer_{{ GLOBALS.hostname }}" \
'{"name": $NAME,"description": $NAME,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_{{ GLOBALS.hostname }}" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Initial Endpoints Policy
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
@@ -160,4 +170,4 @@ salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers
salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0
exit 0

View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
. /usr/sbin/so-common
# Check to make sure that Kibana API is up & ready
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
exit 1
fi
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
if ! echo "$output" | grep -q "so-manager_kafka"; then
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
KAFKA_OUTPUT_VERSION="2.6.0"
JSON_STRING=$( jq -n \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-securityonion","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
)
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
exit 1
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
fi
elif echo "$output" | grep -q "so-manager_kafka"; then
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
fi
{% else %}
echo -e "\nNo update required...\n"
{% endif %}

View File

@@ -4,7 +4,7 @@
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA

View File

@@ -1,23 +1,37 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %}
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
{# ES_LOGSTASH_NODES is the same as LOGSTASH_NODES from logstash/map.jinja but heavynodes and fleet nodes are removed #}
{% set ES_LOGSTASH_NODES = [] %}
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{# this is a list of dicts containing hostname:ip for elasticsearch nodes that need to know about each other for cluster #}
{% set ELASTICSEARCH_SEED_HOSTS = [] %}
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{% for node_type, node_details in node_data.items() | sort %}
{% if node_type not in ['heavynode', 'fleet'] %}
{% if node_type != 'heavynode' %}
{% for hostname in node_data[node_type].keys() %}
{% do ES_LOGSTASH_NODES.append({hostname:node_details[hostname].ip}) %}
{% do ELASTICSEARCH_SEED_HOSTS.append({hostname:node_details[hostname].ip}) %}
{% endfor %}
{% endif %}
{% endfor %}
{# this is a list of dicts containing hostname:ip of all nodes running elasticsearch #}
{% set ELASTICSEARCH_NODES = [] %}
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{% for node_type, node_details in node_data.items() %}
{% for hostname in node_data[node_type].keys() %}
{% do ELASTICSEARCH_NODES.append({hostname:node_details[hostname].ip}) %}
{% endfor %}
{% endfor %}
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
{% if ES_LOGSTASH_NODES | length > 1 %}
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
{% for NODE in ES_LOGSTASH_NODES %}
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %}
{% endfor %}
{% endif %}

View File

@@ -118,6 +118,11 @@ esingestconf:
- user: 930
- group: 939
# Remove .fleet_final_pipeline-1 because we are using global@custom now
so-fleet-final-pipeline-remove:
file.absent:
- name: /opt/so/conf/elasticsearch/ingest/.fleet_final_pipeline-1
# Auto-generate Elasticsearch ingest node pipelines from pillar
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
es_ingest_conf_{{pipeline}}:

File diff suppressed because it is too large Load Diff

View File

@@ -6,10 +6,11 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
so-elasticsearch_image:
docker_image.present:
- name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ GLOBALS.so_version }}
- name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}
{% else %}

View File

@@ -7,8 +7,8 @@
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'logstash/map.jinja' import LOGSTASH_NODES %}
{% from 'elasticsearch/config.map.jinja' import ES_LOGSTASH_NODES %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
@@ -19,7 +19,7 @@ include:
so-elasticsearch:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ GLOBALS.so_version }}
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ ELASTICSEARCHMERGED.version }}
- hostname: elasticsearch
- name: so-elasticsearch
- user: elasticsearch
@@ -27,7 +27,7 @@ so-elasticsearch:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
- extra_hosts:
{% for node in LOGSTASH_NODES %}
{% for node in ELASTICSEARCH_NODES %}
{% for hostname, ip in node.items() %}
- {{hostname}}:{{ip}}
{% endfor %}
@@ -38,7 +38,7 @@ so-elasticsearch:
{% endfor %}
{% endif %}
- environment:
{% if ES_LOGSTASH_NODES | length == 1 or GLOBALS.role == 'so-heavynode' %}
{% if ELASTICSEARCH_SEED_HOSTS | length == 1 or GLOBALS.role == 'so-heavynode' %}
- discovery.type=single-node
{% endif %}
- ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true

View File

@@ -62,6 +62,7 @@
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
{%- endraw %}
{%- if HIGHLANDER %}
@@ -72,7 +73,9 @@
}
}
{%- endif %}
{%- raw %}
{%- raw %}
,
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
]
}
{% endraw %}

View File

@@ -1,106 +0,0 @@
{
"version": 3,
"_meta": {
"managed_by": "fleet",
"managed": true
},
"description": "Final pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [
{
"date": {
"description": "Add time when event was ingested (and remove sub-seconds to improve storage efficiency)",
"tag": "truncate-subseconds-event-ingested",
"field": "_ingest.timestamp",
"target_field": "event.ingested",
"formats": [
"ISO8601"
],
"output_format": "date_time_no_millis",
"ignore_failure": true
}
},
{
"remove": {
"description": "Remove any pre-existing untrusted values.",
"field": [
"event.agent_id_status",
"_security"
],
"ignore_missing": true
}
},
{
"set_security_user": {
"field": "_security",
"properties": [
"authentication_type",
"username",
"realm",
"api_key"
]
}
},
{
"script": {
"description": "Add event.agent_id_status based on the API key metadata and the agent.id contained in the event.\n",
"tag": "agent-id-status",
"source": "boolean is_user_trusted(def ctx, def users) {\n if (ctx?._security?.username == null) {\n return false;\n }\n\n def user = null;\n for (def item : users) {\n if (item?.username == ctx._security.username) {\n user = item;\n break;\n }\n }\n\n if (user == null || user?.realm == null || ctx?._security?.realm?.name == null) {\n return false;\n }\n\n if (ctx._security.realm.name != user.realm) {\n return false;\n }\n\n return true;\n}\n\nString verified(def ctx, def params) {\n // No agent.id field to validate.\n if (ctx?.agent?.id == null) {\n return \"missing\";\n }\n\n // Check auth metadata from API key.\n if (ctx?._security?.authentication_type == null\n // Agents only use API keys.\n || ctx._security.authentication_type != 'API_KEY'\n // Verify the API key owner before trusting any metadata it contains.\n || !is_user_trusted(ctx, params.trusted_users)\n // Verify the API key has metadata indicating the assigned agent ID.\n || ctx?._security?.api_key?.metadata?.agent_id == null) {\n return \"auth_metadata_missing\";\n }\n\n // The API key can only be used represent the agent.id it was issued to.\n if (ctx._security.api_key.metadata.agent_id != ctx.agent.id) {\n // Potential masquerade attempt.\n return \"mismatch\";\n }\n\n return \"verified\";\n}\n\nif (ctx?.event == null) {\n ctx.event = [:];\n}\n\nctx.event.agent_id_status = verified(ctx, params);",
"params": {
"trusted_users": [
{
"username": "elastic/fleet-server",
"realm": "_service_account"
},
{
"username": "cloud-internal-agent-server",
"realm": "found"
},
{
"username": "elastic",
"realm": "reserved"
}
]
}
}
},
{
"remove": {
"field": "_security",
"ignore_missing": true
}
},
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [
{
"remove": {
"field": "_security",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"append": {
"field": "error.message",
"value": [
"failed in Fleet agent final_pipeline: {{ _ingest.on_failure_message }}"
]
}
}
]
}

View File

@@ -0,0 +1,27 @@
{
"version": 3,
"_meta": {
"managed_by": "securityonion",
"managed": true
},
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
]
}

View File

@@ -1,5 +1,5 @@
{
"description": "Pipeline for pfSense",
"description": "Pipeline for PFsense",
"processors": [
{
"set": {

View File

@@ -1,9 +1,14 @@
{
"description": "Pipeline for parsing pfSense Suricata logs.",
"processors": [
{ "set": {
"field": "event.module",
"value": "suricata"
}
},
{
"pipeline": {
"name": "suricata.common"
"name": "suricata.common_pfsense"
}
}
],

View File

@@ -0,0 +1,414 @@
{
"description": "Pipeline for PFsense",
"processors": [
{
"set": {
"field": "ecs.version",
"value": "8.11.0"
}
},
{
"set": {
"field": "observer.vendor",
"value": "netgate"
}
},
{
"set": {
"field": "observer.type",
"value": "firewall"
}
},
{
"rename": {
"field": "message",
"target_field": "event.original",
"ignore_missing": true,
"if": "ctx.event?.original == null"
}
},
{
"set": {
"field": "event.kind",
"value": "event"
}
},
{
"set": {
"field": "event.timezone",
"value": "{{_tmp.tz_offset}}",
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
}
},
{
"grok": {
"description": "Parse syslog header",
"field": "event.original",
"patterns": [
"^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}"
],
"pattern_definitions": {
"ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?",
"TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})",
"BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:",
"BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))",
"NAME": "[[[:alnum:]]_-]+",
"SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})",
"TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?",
"OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})",
"UNIXPATH": "(/([\\w_%!$@:.,+~-]+|\\\\.)*)*",
"PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH})%{BASEPATH:process.name})",
"BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+",
"META": "\\[[^\\]]*\\]"
}
}
},
{
"date": {
"if": "ctx._tmp.timestamp8601 != null",
"field": "_tmp.timestamp8601",
"target_field": "@timestamp",
"formats": [
"ISO8601"
]
}
},
{
"date": {
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
"field": "_tmp.timestamp",
"target_field": "@timestamp",
"formats": [
"MMM d HH:mm:ss",
"MMM d HH:mm:ss",
"MMM dd HH:mm:ss"
],
"timezone": "{{ event.timezone }}"
}
},
{
"grok": {
"description": "Set Event Provider",
"field": "process.name",
"patterns": [
"^%{HYPHENATED_WORDS:event.provider}"
],
"pattern_definitions": {
"HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b"
}
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-firewall",
"if": "ctx.event.provider == 'filterlog'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-openvpn",
"if": "ctx.event.provider == 'openvpn'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-ipsec",
"if": "ctx.event.provider == 'charon'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-dhcp",
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-unbound",
"if": "ctx.event.provider == 'unbound'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-haproxy",
"if": "ctx.event.provider == 'haproxy'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-php-fpm",
"if": "ctx.event.provider == 'php-fpm'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.19.1-squid",
"if": "ctx.event.provider == 'squid'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-suricata",
"if": "ctx.event.provider == 'suricata'"
}
},
{
"drop": {
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)"
}
},
{
"append": {
"field": "event.category",
"value": "network",
"if": "ctx.network != null"
}
},
{
"convert": {
"field": "source.address",
"target_field": "source.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"convert": {
"field": "destination.address",
"target_field": "destination.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"set": {
"field": "network.type",
"value": "ipv6",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
}
},
{
"set": {
"field": "network.type",
"value": "ipv4",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
}
},
{
"geoip": {
"field": "source.ip",
"target_field": "source.geo",
"ignore_missing": true
}
},
{
"geoip": {
"field": "destination.ip",
"target_field": "destination.geo",
"ignore_missing": true
}
},
{
"geoip": {
"ignore_missing": true,
"database_file": "GeoLite2-ASN.mmdb",
"field": "source.ip",
"target_field": "source.as",
"properties": [
"asn",
"organization_name"
]
}
},
{
"geoip": {
"database_file": "GeoLite2-ASN.mmdb",
"field": "destination.ip",
"target_field": "destination.as",
"properties": [
"asn",
"organization_name"
],
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.asn",
"target_field": "source.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.organization_name",
"target_field": "source.as.organization.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.asn",
"target_field": "destination.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.organization_name",
"target_field": "destination.as.organization.name",
"ignore_missing": true
}
},
{
"community_id": {
"target_field": "network.community_id",
"ignore_failure": true
}
},
{
"grok": {
"field": "observer.ingress.interface.name",
"patterns": [
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
],
"ignore_missing": true,
"ignore_failure": true
}
},
{
"set": {
"field": "network.vlan.id",
"copy_from": "observer.ingress.vlan.id",
"ignore_empty_value": true
}
},
{
"append": {
"field": "related.ip",
"value": "{{destination.ip}}",
"allow_duplicates": false,
"if": "ctx.destination?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.nat.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.nat?.ip != null"
}
},
{
"append": {
"field": "related.hosts",
"value": "{{destination.domain}}",
"if": "ctx.destination?.domain != null"
}
},
{
"append": {
"field": "related.user",
"value": "{{user.name}}",
"if": "ctx.user?.name != null"
}
},
{
"set": {
"field": "network.direction",
"value": "{{network.direction}}bound",
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
}
},
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"script": {
"lang": "painless",
"description": "This script processor iterates over the whole document to remove fields with null values.",
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
}
},
{
"remove": {
"field": "event.original",
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "global@custom",
"ignore_missing_pipeline": true,
"description": "[Fleet] Global pipeline for all data streams"
}
},
{
"pipeline": {
"name": "logs@custom",
"ignore_missing_pipeline": true,
"description": "[Fleet] Pipeline for all data streams of type `logs`"
}
},
{
"pipeline": {
"name": "logs-pfsense.integration@custom",
"ignore_missing_pipeline": true,
"description": "[Fleet] Pipeline for all data streams of type `logs` defined by the `pfsense` integration"
}
},
{
"pipeline": {
"name": "logs-pfsense.log@custom",
"ignore_missing_pipeline": true,
"description": "[Fleet] Pipeline for the `pfsense.log` dataset"
}
}
],
"on_failure": [
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"set": {
"field": "event.kind",
"value": "pipeline_error"
}
},
{
"append": {
"field": "error.message",
"value": "{{{ _ingest.on_failure_message }}}"
}
}
],
"_meta": {
"managed_by": "fleet",
"managed": true,
"package": {
"name": "pfsense"
}
}
}

View File

@@ -1,6 +1,7 @@
{
"description" : "suricata.alert",
"processors" : [
{ "set": { "field": "_index", "value": "logs-suricata.alerts-so" } },
{ "set": { "field": "tags","value": "alert" }},
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },

View File

@@ -0,0 +1,16 @@
{
"description" : "suricata.alert",
"processors" : [
{ "set": { "field": "data_stream.dataset", "value": "suricata" } },
{ "set": { "field": "data_stream.namespace", "value": "so" } },
{ "set": { "field": "_index", "value": "logs-suricata.alerts-so" } },
{ "set": { "field": "tags","value": "alert" }},
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
{ "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
{ "pipeline": { "name": "common.nids" } }
]
}

View File

@@ -0,0 +1,23 @@
{
"description" : "suricata.common",
"processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
{ "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
{ "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
{ "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
{ "remove":{ "field": "agent", "ignore_failure": true } },
{ "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}_pfsense" } }
]
}

View File

@@ -1,7 +1,13 @@
elasticsearch:
enabled:
description: You can enable or disable Elasticsearch.
description: Enables or disables the Elasticsearch process. This process provides the log event storage system. WARNING - Disabling this process is unsupported.
advanced: True
helpLink: elasticsearch.html
version:
description: "This specifies the version of the following containers: so-elastic-fleet-package-registry, so-elastic-agent, so-elastic-fleet, so-kibana, so-logstash and so-elasticsearch. Modifying this value in the Elasticsearch defaults.yaml will result in catastrophic grid failure."
readonly: True
global: True
advanced: True
esheap:
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
helpLink: elasticsearch.html
@@ -376,6 +382,7 @@ elasticsearch:
so-logs-azure_x_signinlogs: *indexSettings
so-logs-azure_x_springcloudlogs: *indexSettings
so-logs-barracuda_x_waf: *indexSettings
so-logs-barracuda_cloudgen_firewall_x_log: *indexSettings
so-logs-cef_x_log: *indexSettings
so-logs-cisco_asa_x_log: *indexSettings
so-logs-cisco_ftd_x_log: *indexSettings
@@ -430,6 +437,7 @@ elasticsearch:
so-logs-httpjson_x_generic: *indexSettings
so-logs-iis_x_access: *indexSettings
so-logs-iis_x_error: *indexSettings
so-logs-imperva_cloud_waf_x_event: *indexSettings
so-logs-juniper_x_junos: *indexSettings
so-logs-juniper_x_netscreen: *indexSettings
so-logs-juniper_x_srx: *indexSettings
@@ -466,6 +474,13 @@ elasticsearch:
so-logs-sonicwall_firewall_x_log: *indexSettings
so-logs-snort_x_log: *indexSettings
so-logs-symantec_endpoint_x_log: *indexSettings
so-logs-tenable_io_x_asset: *indexSettings
so-logs-tenable_io_x_plugin: *indexSettings
so-logs-tenable_io_x_scan: *indexSettings
so-logs-tenable_io_x_vulnerability: *indexSettings
so-logs-tenable_sc_x_asset: *indexSettings
so-logs-tenable_sc_x_plugin: *indexSettings
so-logs-tenable_sc_x_vulnerability: *indexSettings
so-logs-ti_abusech_x_malware: *indexSettings
so-logs-ti_abusech_x_malwarebazaar: *indexSettings
so-logs-ti_abusech_x_threatfox: *indexSettings
@@ -521,6 +536,7 @@ elasticsearch:
so-endgame: *indexSettings
so-idh: *indexSettings
so-suricata: *indexSettings
so-suricata_x_alerts: *indexSettings
so-import: *indexSettings
so-kratos: *indexSettings
so-kismet: *indexSettings
@@ -529,6 +545,58 @@ elasticsearch:
so-strelka: *indexSettings
so-syslog: *indexSettings
so-zeek: *indexSettings
so-metrics-fleet_server_x_agent_status: &fleetMetricsSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
advanced: True
readonly: True
helpLink: elasticsearch.html
index_template:
ignore_missing_component_templates:
description: Ignore component templates if they aren't in Elasticsearch.
advanced: True
readonly: True
helpLink: elasticsearch.html
index_patterns:
description: Patterns for matching multiple indices or tables.
advanced: True
readonly: True
helpLink: elasticsearch.html
template:
settings:
index:
mode:
description: Type of mode used for this index. Time series indices can be used for metrics to reduce necessary storage.
advanced: True
readonly: True
helpLink: elasticsearch.html
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
advanced: True
readonly: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
advanced: True
readonly: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
advanced: True
readonly: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
advanced: True
readonly: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
advanced: True
readonly: True
helpLink: elasticsearch.html
so-metrics-fleet_server_x_agent_versions: *fleetMetricsSettings
so_roles:
so-manager: &soroleSettings
config:

View File

@@ -1,3 +1,8 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
{% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %}
@@ -17,10 +22,26 @@
{% set ES_INDEX_SETTINGS = {} %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #}
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
{# prevent this action from being performed on custom defined indices. #}
{# the custom defined index is not present in either of the dictionaries and fails to reder. #}
{% if index in ES_INDEX_SETTINGS_ORIG and index in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES %}
{# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #}
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
{% endif %}
{# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #}
{% if ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
{% for phase in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.copy() %}
{% if ES_INDEX_SETTINGS_ORIG[index].policy.phases[phase] is not defined %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% if settings.index_template is defined %}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,36 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"destination": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More