Merge branch '2.4/main' of github.com:Security-Onion-Solutions/securityonion into 2.4/main

This commit is contained in:
Mike Reeves
2024-06-21 14:06:51 -04:00
174 changed files with 4240 additions and 4108 deletions

33
.github/workflows/close-threads.yml vendored Normal file
View File

@@ -0,0 +1,33 @@
name: 'Close Threads'
on:
schedule:
- cron: '50 1 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
discussions: write
concurrency:
group: lock-threads
jobs:
close-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: -1
days-before-issue-close: 60
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
days-before-pr-stale: 45
days-before-pr-close: 60
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."

View File

@@ -2,7 +2,7 @@ name: 'Lock Threads'
on:
schedule:
- cron: '50 1 * * *'
- cron: '50 2 * * *'
workflow_dispatch:
permissions:
@@ -14,24 +14,8 @@ concurrency:
group: lock-threads
jobs:
close-threads:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: -1
days-before-issue-close: 60
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
days-before-pr-stale: 45
days-before-pr-close: 60
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."
lock-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest
steps:
- uses: jertel/lock-threads@main

View File

@@ -1,17 +1,17 @@
### 2.4.60-20240320 ISO image released on 2024/03/20
### 2.4.70-20240529 ISO image released on 2024/05/29
### Download and Verify
2.4.60-20240320 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso
2.4.70-20240529 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
MD5: 178DD42D06B2F32F3870E0C27219821E
SHA1: 73EDCD50817A7F6003FE405CF1808A30D034F89D
SHA256: DD334B8D7088A7B78160C253B680D645E25984BA5CCAB5CC5C327CA72137FC06
MD5: 8FCCF31C2470D1ABA380AF196B611DEC
SHA1: EE5E8F8C14819E7A1FE423E6920531A97F39600B
SHA256: EF5E781D50D50660F452ADC54FD4911296ECBECED7879FA8E04687337CA89BEC
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.60-20240320.iso.sig securityonion-2.4.60-20240320.iso
gpg --verify securityonion-2.4.70-20240529.iso.sig securityonion-2.4.70-20240529.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Tue 19 Mar 2024 03:17:58 PM EDT using RSA key ID FE507013
gpg: Signature made Wed 29 May 2024 11:40:59 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -8,19 +8,22 @@ Alerts
![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png)
Dashboards
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/51_dashboards.png)
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_dashboards.png)
Hunt
![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/52_hunt.png)
![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png)
Detections
![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_detections.png)
PCAP
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_pcap.png)
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png)
Grid
![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_grid.png)
![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/75_grid.png)
Config
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/61_config.png)
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/87_config.png)
### Release Notes

View File

@@ -1 +1 @@
2.4.60
2.4.70

View File

@@ -43,8 +43,6 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- kratos.soc_kratos
@@ -61,8 +59,6 @@ base:
- elastalert.adv_elastalert
- backup.soc_backup
- backup.adv_backup
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
@@ -108,8 +104,6 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- strelka.soc_strelka
@@ -165,8 +159,6 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- strelka.soc_strelka
@@ -262,8 +254,6 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- backup.soc_backup

View File

@@ -34,7 +34,6 @@
'suricata',
'utility',
'schedule',
'soctopus',
'tcpreplay',
'docker_clean'
],
@@ -66,6 +65,7 @@
'registry',
'manager',
'nginx',
'strelka.manager',
'soc',
'kratos',
'influxdb',
@@ -92,6 +92,7 @@
'nginx',
'telegraf',
'influxdb',
'strelka.manager',
'soc',
'kratos',
'elasticfleet',
@@ -101,7 +102,6 @@
'suricata.manager',
'utility',
'schedule',
'soctopus',
'docker_clean',
'stig'
],
@@ -113,6 +113,7 @@
'nginx',
'telegraf',
'influxdb',
'strelka.manager',
'soc',
'kratos',
'elastic-fleet-package-registry',
@@ -123,7 +124,6 @@
'suricata.manager',
'utility',
'schedule',
'soctopus',
'docker_clean',
'stig'
],
@@ -157,7 +157,6 @@
'healthcheck',
'utility',
'schedule',
'soctopus',
'tcpreplay',
'docker_clean',
'stig'
@@ -200,10 +199,6 @@
],
}, grain='role') %}
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
{% do allowed_states.append('mysql') %}
{% endif %}
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
{% do allowed_states.append('zeek') %}
{%- endif %}
@@ -229,10 +224,6 @@
{% do allowed_states.append('elastalert') %}
{% endif %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
{% do allowed_states.append('playbook') %}
{% endif %}
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
{% do allowed_states.append('logstash') %}
{% endif %}

View File

@@ -1,6 +1,6 @@
bpf:
pcap:
description: List of BPF filters to apply to PCAP.
description: List of BPF filters to apply to Stenographer.
multiline: True
forcedType: "[]string"
helpLink: bpf.html

View File

@@ -1,9 +1,16 @@
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
{% if SOC_GLOBAL.global.airgap %}
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
{% else %}
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
{% endif %}
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
{% if SOC_GLOBAL.global.airgap %}
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
{% else %}
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
{% endif %}
remove_common_soup:
file.absent:
@@ -13,6 +20,8 @@ remove_common_so-firewall:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
# This section is used to put the scripts in place in the Salt file system
# in case a state run tries to overwrite what we do in the next section.
copy_so-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
@@ -41,6 +50,15 @@ copy_so-firewall_manager_tools_sbin:
- force: True
- preserve: True
copy_so-yaml_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
# This section is used to put the new script in place so that it can be called during soup.
# It is faster than calling the states that normally manage them to put them in place.
copy_so-common_sbin:
file.copy:
- name: /usr/sbin/so-common
@@ -68,3 +86,19 @@ copy_so-firewall_sbin:
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_sbin:
file.copy:
- name: /usr/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
{% else %}
fix_23_soup_sbin:
cmd.run:
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
fix_23_soup_salt:
cmd.run:
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
{% endif %}

View File

@@ -5,8 +5,13 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
salt-call state.highstate -l info
cat << EOF
so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see:
https://docs.securityonion.net/en/2.4/salt.html
EOF
salt-call state.highstate -l info queue=True

View File

@@ -179,6 +179,21 @@ copy_new_files() {
cd /tmp
}
create_local_directories() {
echo "Creating local pillar and salt directories if needed"
PILLARSALTDIR=$1
local_salt_dir="/opt/so/saltstack/local"
for i in "pillar" "salt"; do
for d in $(find $PILLARSALTDIR/$i -type d); do
suffixdir=${d//$PILLARSALTDIR/}
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
mkdir -pv $local_salt_dir$suffixdir
fi
done
chown -R socore:socore $local_salt_dir/$i
done
}
disable_fastestmirror() {
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
}
@@ -248,6 +263,14 @@ get_random_value() {
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
else
AGENTCOUNT=0
fi
}
gpg_rpm_import() {
if [[ $is_oracle ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
@@ -329,7 +352,7 @@ lookup_salt_value() {
local=""
fi
salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local}
}
lookup_pillar() {
@@ -570,8 +593,9 @@ sync_options() {
set_version
set_os
salt_minion_count
get_agent_count
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)"
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
}
systemctl_func() {

View File

@@ -53,13 +53,10 @@ container_list() {
"so-kibana"
"so-kratos"
"so-logstash"
"so-mysql"
"so-nginx"
"so-pcaptools"
"so-playbook"
"so-redis"
"so-soc"
"so-soctopus"
"so-steno"
"so-strelka-backend"
"so-strelka-filestream"

View File

@@ -49,10 +49,6 @@ if [ "$CONTINUE" == "y" ]; then
sed -i "s|$OLD_IP|$NEW_IP|g" $file
done
echo "Granting MySQL root user permissions on $NEW_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
echo "Removing MySQL root user from $OLD_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
echo "Updating Kibana dashboards"
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True

View File

@@ -122,6 +122,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -154,15 +155,11 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
@@ -201,7 +198,13 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
fi
RESULT=0
@@ -210,7 +213,9 @@ RESULT=0
CONTAINER_IDS=$(docker ps -q)
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
exclude_container so-idstools # ignore due to known issues and noisy logging
exclude_container so-playbook # ignore due to several playbook known issues
exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers
for container_id in $CONTAINER_IDS; do
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
@@ -228,10 +233,14 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet
exclude_log "spool" # disregard zeek analyze logs as this is data specific
exclude_log "import" # disregard imported test data the contains error strings
exclude_log "update.log" # ignore playbook updates due to several known issues
exclude_log "playbook.log" # ignore due to several playbook known issues
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
exclude_log "cron-close.log" # ignore since Curator has been removed
exclude_log "curator.log" # ignore since Curator has been removed
exclude_log "curator.log" # ignore since Curator has been removed
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file"

View File

@@ -0,0 +1,98 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0."
set -e
# This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot.
if [ -z $NOROOT ]; then
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
fi
ENROLL_TPM=N
while [[ $# -gt 0 ]]; do
case $1 in
--enroll-tpm)
ENROLL_TPM=Y
;;
*)
echo "Usage: $0 [options]"
echo ""
echo "where options are:"
echo " --enroll-tpm for when TPM enrollment was not selected during ISO install."
echo ""
exit 1
;;
esac
shift
done
check_for_tpm() {
echo -n "Checking for TPM: "
if [ -d /sys/class/tpm/tpm0 ]; then
echo -e "tpm0 found."
TPM="yes"
# Check if TPM is using sha1 or sha256
if [ -d /sys/class/tpm/tpm0/pcr-sha1 ]; then
echo -e "TPM is using sha1.\n"
TPM_PCR="sha1"
elif [ -d /sys/class/tpm/tpm0/pcr-sha256 ]; then
echo -e "TPM is using sha256.\n"
TPM_PCR="sha256"
fi
else
echo -e "No TPM found.\n"
exit 1
fi
}
check_for_luks_partitions() {
echo "Checking for LUKS partitions"
for part in $(lsblk -o NAME,FSTYPE -ln | grep crypto_LUKS | awk '{print $1}'); do
echo "Found LUKS partition: $part"
LUKS_PARTITIONS+=("$part")
done
if [ ${#LUKS_PARTITIONS[@]} -eq 0 ]; then
echo -e "No LUKS partitions found.\n"
exit 1
fi
echo ""
}
enroll_tpm_in_luks() {
read -s -p "Enter the LUKS passphrase used during ISO install: " LUKS_PASSPHRASE
echo ""
for part in "${LUKS_PARTITIONS[@]}"; do
echo "Enrolling TPM for LUKS device: /dev/$part"
if [ "$TPM_PCR" == "sha1" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha1","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
elif [ "$TPM_PCR" == "sha256" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
fi
done
}
regenerate_tpm_enrollment_token() {
for part in "${LUKS_PARTITIONS[@]}"; do
clevis luks regen -d /dev/$part -s 1 -q
done
}
check_for_tpm
check_for_luks_partitions
if [[ $ENROLL_TPM == "Y" ]]; then
enroll_tpm_in_luks
else
regenerate_tpm_enrollment_token
fi
echo "Running dracut"
dracut -fv
echo -e "\nTPM configuration complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n"

View File

@@ -89,6 +89,7 @@ function suricata() {
-v ${LOG_PATH}:/var/log/suricata/:rw \
-v ${NSM_PATH}/:/nsm/:rw \
-v "$PCAP:/input.pcap:ro" \
-v /dev/null:/nsm/suripcap:rw \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
@@ -247,7 +248,7 @@ fi
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
status "Import complete!"
status

View File

@@ -67,13 +67,6 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-mysql':
final_octet: 30
port_bindings:
- 0.0.0.0:3306:3306
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-nginx':
final_octet: 31
port_bindings:
@@ -91,13 +84,6 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-playbook':
final_octet: 32
port_bindings:
- 0.0.0.0:3000:3000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-redis':
final_octet: 33
port_bindings:
@@ -118,13 +104,6 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-soctopus':
final_octet: 35
port_bindings:
- 0.0.0.0:7000:7000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-backend':
final_octet: 36
custom_bind_mounts: []
@@ -201,6 +180,8 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
ulimits:
- memlock=524288000
'so-zeek':
final_octet: 99
custom_bind_mounts: []

View File

@@ -46,14 +46,11 @@ docker:
so-kibana: *dockerOptions
so-kratos: *dockerOptions
so-logstash: *dockerOptions
so-mysql: *dockerOptions
so-nginx: *dockerOptions
so-nginx-fleet-node: *dockerOptions
so-playbook: *dockerOptions
so-redis: *dockerOptions
so-sensoroni: *dockerOptions
so-soc: *dockerOptions
so-soctopus: *dockerOptions
so-strelka-backend: *dockerOptions
so-strelka-filestream: *dockerOptions
so-strelka-frontend: *dockerOptions
@@ -66,5 +63,41 @@ docker:
so-elastic-agent: *dockerOptions
so-telegraf: *dockerOptions
so-steno: *dockerOptions
so-suricata: *dockerOptions
so-suricata:
final_octet:
description: Last octet of the container IP address.
helpLink: docker.html
readonly: True
advanced: True
global: True
port_bindings:
description: List of port bindings for the container.
helpLink: docker.html
advanced: True
multiline: True
forcedType: "[]string"
custom_bind_mounts:
description: List of custom local volume bindings.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_hosts:
description: List of additional host entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
ulimits:
description: Ulimits for the container, in bytes.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions

View File

@@ -82,6 +82,36 @@ elastasomodulesync:
- group: 933
- makedirs: True
elastacustomdir:
file.directory:
- name: /opt/so/conf/elastalert/custom
- user: 933
- group: 933
- makedirs: True
elastacustomsync:
file.recurse:
- name: /opt/so/conf/elastalert/custom
- source: salt://elastalert/files/custom
- user: 933
- group: 933
- makedirs: True
- file_mode: 660
- show_changes: False
elastapredefinedsync:
file.recurse:
- name: /opt/so/conf/elastalert/predefined
- source: salt://elastalert/files/predefined
- user: 933
- group: 933
- makedirs: True
- template: jinja
- file_mode: 660
- context:
elastalert: {{ ELASTALERTMERGED }}
- show_changes: False
elastaconf:
file.managed:
- name: /opt/so/conf/elastalert/elastalert_config.yaml

View File

@@ -1,5 +1,6 @@
elastalert:
enabled: False
alerter_parameters: ""
config:
rules_folder: /opt/elastalert/rules/
scan_subdirectories: true

View File

@@ -30,6 +30,8 @@ so-elastalert:
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
- /opt/so/log/elastalert:/var/log/elastalert:rw
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
- /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro
- /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}

View File

@@ -0,0 +1 @@
THIS IS A PLACEHOLDER FILE

View File

@@ -1,38 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PlaybookESAlerter(Alerter):
"""
Use matched data to create alerts in elasticsearch
"""
required_options = set(['play_title','play_url','sigma_level'])
def alert(self, matches):
for match in matches:
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'PlaybookESAlerter'}

View File

@@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class SecurityOnionESAlerter(Alerter):
"""
Use matched data to create alerts in Elasticsearch.
"""
required_options = set(['detection_title', 'sigma_level'])
optional_fields = ['sigma_category', 'sigma_product', 'sigma_service']
def alert(self, matches):
for match in matches:
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
# Start building the rule dict
rule_info = {
"name": self.rule['detection_title'],
"uuid": self.rule['detection_public_id']
}
# Add optional fields if they are present in the rule
for field in self.optional_fields:
rule_key = field.split('_')[-1] # Assumes field format "sigma_<key>"
if field in self.rule:
rule_info[rule_key] = self.rule[field]
# Construct the payload with the conditional rule_info
payload = {
"tags": "alert",
"rule": rule_info,
"event": {
"severity": self.rule['event.severity'],
"module": self.rule['event.module'],
"dataset": self.rule['event.dataset'],
"severity_label": self.rule['sigma_level']
},
"sigma_level": self.rule['sigma_level'],
"event_data": match,
"@timestamp": timestamp
}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-detections.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'SecurityOnionESAlerter'}

View File

@@ -0,0 +1,6 @@
{% if elastalert.get('jira_user', '') | length > 0 and elastalert.get('jira_pass', '') | length > 0 %}
user: {{ elastalert.jira_user }}
password: {{ elastalert.jira_pass }}
{% else %}
apikey: {{ elastalert.get('jira_api_key', '') }}
{% endif %}

View File

@@ -0,0 +1,2 @@
user: {{ elastalert.get('smtp_user', '') }}
password: {{ elastalert.get('smtp_pass', '') }}

View File

@@ -13,3 +13,19 @@
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
{% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %}
{% if 'ntf' in salt['pillar.get']('features', []) %}
{% set params = ELASTALERTMERGED.get('alerter_parameters', '') | load_yaml %}
{% if params != None and params | length > 0 %}
{% do ELASTALERTMERGED.config.update(params) %}
{% endif %}
{% if ELASTALERTMERGED.get('smtp_user', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %}
{% endif %}
{% if ELASTALERTMERGED.get('jira_user', '') | length > 0 or ELASTALERTMERGED.get('jira_key', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %}
{% endif %}
{% endif %}

View File

@@ -2,6 +2,99 @@ elastalert:
enabled:
description: You can enable or disable Elastalert.
helpLink: elastalert.html
alerter_parameters:
title: Alerter Parameters
description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: elastalert.html
forcedType: string
jira_api_key:
title: Jira API Key
description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_pass:
title: Jira Password
description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_user:
title: Jira Username
description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
smtp_pass:
title: SMTP Password
description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
smtp_user:
title: SMTP Username
description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
files:
custom:
alertmanager_ca__crt:
description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
gelf_ca__crt:
description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post2_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
ms_teams_ca__crt:
description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
pagerduty_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
rocket_chat_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__crt:
description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__key:
description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
slack_ca__crt:
description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
config:
disable_rules_on_error:
description: Disable rules on failure.

View File

@@ -37,6 +37,7 @@ elasticfleet:
- azure
- barracuda
- carbonblack_edr
- cef
- checkpoint
- cisco_asa
- cisco_duo
@@ -118,3 +119,8 @@ elasticfleet:
base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m
limit: 100
kismet:
base_url: http://localhost:2501
poll_interval: 1m
api_key:
enabled_nodes: []

View File

@@ -0,0 +1,36 @@
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% raw %}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "kismet-logs",
"namespace": "so",
"description": "Kismet Logs",
"policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}",
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"data_stream.dataset": "kismet",
"request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson",
"request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}",
"request_method": "GET",
"request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'",
"request_redirect_headers_ban_list": [],
"oauth_scopes": [],
"processors": "",
"tags": [],
"pipeline": "kismet.common"
}
}
}
}
},
"force": true
}
{% endraw %}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "winlog",
"version": ""
},
"name": "windows-defender",
"namespace": "default",
"description": "Windows Defender - Operational logs",
"policy_id": "endpoints-initial",
"inputs": {
"winlogs-winlog": {
"enabled": true,
"streams": {
"winlog.winlog": {
"enabled": true,
"vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational",
"data_stream.dataset": "winlog.winlog",
"preserve_original_event": false,
"providers": [],
"ignore_older": "72h",
"language": 0,
"tags": [] }
}
}
}
},
"force": true
}

View File

@@ -0,0 +1,35 @@
{
"policy_id": "so-grid-nodes_general",
"package": {
"name": "log",
"version": ""
},
"name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs",
"namespace": "so",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc",
"tags": [
"so-soc"
],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}

View File

@@ -16,6 +16,9 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -25,6 +28,9 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -16,6 +16,9 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -25,6 +28,9 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -79,3 +79,29 @@ elasticfleet:
helpLink: elastic-fleet.html
advanced: True
forcedType: int
kismet:
base_url:
description: Base URL for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
api_key:
description: API key for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
enabled_nodes:
description: Fleet nodes with the Kismet integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"

View File

@@ -72,5 +72,5 @@ do
printf "\n### $GOOS/$GOARCH Installer Generated...\n"
done
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace"
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n"
rm -rf /nsm/elastic-agent-workspace

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +1,15 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- mysql.sostatus
so-mysql:
docker_container.absent:
- force: True
so-mysql_so-status.disabled:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-mysql$
so-elasticsearch_image:
docker_image.present:
- name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ GLOBALS.so_version }}
{% else %}

View File

@@ -200,9 +200,15 @@ so-elasticsearch-roles-load:
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
{% if ELASTICSEARCHMERGED.index_clean %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
so-elasticsearch-indices-delete:
cron.present:
cron.{{ap}}:
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
- identifier: so-elasticsearch-indices-delete
- user: root
@@ -211,7 +217,8 @@ so-elasticsearch-indices-delete:
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}
{% endif %}
{% endif %}
{% else %}

View File

@@ -57,10 +57,11 @@
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }},
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } },
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }},
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
{%- endraw %}
{%- if HIGHLANDER %}

View File

@@ -68,7 +68,7 @@
"field": "_security",
"ignore_missing": true
}
},
},
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
@@ -80,9 +80,10 @@
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [

View File

@@ -0,0 +1,10 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -0,0 +1,50 @@
{
"processors": [
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked",
"target_field": "network.wireless.ssid_cloaked",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid",
"target_field": "network.wireless.ssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null"
}
},
{
"set": {
"field": "network.wireless.ssid",
"value": "Hidden",
"if": "ctx?.network?.wireless?.ssid_cloaked != null && ctx?.network?.wireless?.ssid_cloaked == 1"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc",
"target_field": "network.wireless.channel_utilization",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -0,0 +1,16 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -0,0 +1,29 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.last_connected_bssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_client_map",
"processor": {
"append": {
"field": "network.wireless.known_connected_bssid",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_client_map != null"
}
}
]
}

View File

@@ -0,0 +1,159 @@
{
"processors": [
{
"json": {
"field": "message",
"target_field": "message2"
}
},
{
"date": {
"field": "message2.kismet_device_base_mod_time",
"formats": [
"epoch_second"
],
"target_field": "@timestamp"
}
},
{
"set": {
"field": "event.category",
"value": "network"
}
},
{
"dissect": {
"field": "message2.kismet_device_base_type",
"pattern": "%{wifi} %{device_type}"
}
},
{
"lowercase": {
"field": "device_type"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.{{device_type}}"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.wds_ap",
"if": "ctx?.device_type == 'wds ap'"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.ad_hoc",
"if": "ctx?.device_type == 'ad-hoc'"
}
},
{
"set": {
"field": "event.module",
"value": "kismet"
}
},
{
"rename": {
"field": "message2.kismet_device_base_packets_tx_total",
"target_field": "source.packets"
}
},
{
"rename": {
"field": "message2.kismet_device_base_num_alerts",
"target_field": "kismet.alerts.count"
}
},
{
"rename": {
"field": "message2.kismet_device_base_channel",
"target_field": "network.wireless.channel",
"if": "ctx?.message2?.kismet_device_base_channel != ''"
}
},
{
"rename": {
"field": "message2.kismet_device_base_frequency",
"target_field": "network.wireless.frequency",
"if": "ctx?.message2?.kismet_device_base_frequency != 0"
}
},
{
"rename": {
"field": "message2.kismet_device_base_last_time",
"target_field": "kismet.last_seen"
}
},
{
"date": {
"field": "kismet.last_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.last_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_first_time",
"target_field": "kismet.first_seen"
}
},
{
"date": {
"field": "kismet.first_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.first_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_seenby",
"target_field": "kismet.seenby"
}
},
{
"foreach": {
"field": "kismet.seenby",
"processor": {
"pipeline": {
"name": "kismet.seenby"
}
}
}
},
{
"rename": {
"field": "message2.kismet_device_base_manuf",
"target_field": "device.manufacturer"
}
},
{
"pipeline": {
"name": "{{event.dataset}}"
}
},
{
"remove": {
"field": [
"message2",
"message",
"device_type",
"wifi",
"agent",
"host",
"event.created"
],
"ignore_failure": true
}
}
]
}

View File

@@ -0,0 +1,9 @@
{
"processors": [
{
"pipeline": {
"name": "kismet.client"
}
}
]
}

View File

@@ -0,0 +1,52 @@
{
"processors": [
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_num_packets",
"target_field": "_ingest._value.packets_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_uuid",
"target_field": "_ingest._value.serial_number",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_first_time",
"target_field": "_ingest._value.first_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_last_time",
"target_field": "_ingest._value.last_seen",
"ignore_missing": true
}
},
{
"date": {
"field": "_ingest._value.first_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.first_seen",
"ignore_failure": true
}
},
{
"date": {
"field": "_ingest._value.last_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.last_seen",
"ignore_failure": true
}
}
]
}

View File

@@ -0,0 +1,10 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
}
]
}

View File

@@ -0,0 +1,22 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_commonname",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -0,0 +1,389 @@
{
"description": "Pipeline for pfSense",
"processors": [
{
"set": {
"field": "ecs.version",
"value": "8.10.0"
}
},
{
"set": {
"field": "observer.vendor",
"value": "netgate"
}
},
{
"set": {
"field": "observer.type",
"value": "firewall"
}
},
{
"rename": {
"field": "message",
"target_field": "event.original"
}
},
{
"set": {
"field": "event.kind",
"value": "event"
}
},
{
"set": {
"field": "event.timezone",
"value": "{{_tmp.tz_offset}}",
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
}
},
{
"grok": {
"description": "Parse syslog header",
"field": "event.original",
"patterns": [
"^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}"
],
"pattern_definitions": {
"ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?",
"TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})",
"BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:",
"BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))",
"NAME": "[[[:alnum:]]_-]+",
"SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})",
"TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?",
"OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})",
"PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})",
"BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+",
"META": "\\[[^\\]]*\\]"
}
}
},
{
"date": {
"if": "ctx._tmp.timestamp8601 != null",
"field": "_tmp.timestamp8601",
"target_field": "@timestamp",
"formats": [
"ISO8601"
]
}
},
{
"date": {
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
"field": "_tmp.timestamp",
"target_field": "@timestamp",
"formats": [
"MMM d HH:mm:ss",
"MMM d HH:mm:ss",
"MMM dd HH:mm:ss"
],
"timezone": "{{ event.timezone }}"
}
},
{
"grok": {
"description": "Set Event Provider",
"field": "process.name",
"patterns": [
"^%{HYPHENATED_WORDS:event.provider}"
],
"pattern_definitions": {
"HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b"
}
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-firewall",
"if": "ctx.event.provider == 'filterlog'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-openvpn",
"if": "ctx.event.provider == 'openvpn'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-ipsec",
"if": "ctx.event.provider == 'charon'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-dhcp",
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-unbound",
"if": "ctx.event.provider == 'unbound'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-haproxy",
"if": "ctx.event.provider == 'haproxy'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-php-fpm",
"if": "ctx.event.provider == 'php-fpm'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-squid",
"if": "ctx.event.provider == 'squid'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-suricata",
"if": "ctx.event.provider == 'suricata'"
}
},
{
"drop": {
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)"
}
},
{
"append": {
"field": "event.category",
"value": "network",
"if": "ctx.network != null"
}
},
{
"convert": {
"field": "source.address",
"target_field": "source.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"convert": {
"field": "destination.address",
"target_field": "destination.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"set": {
"field": "network.type",
"value": "ipv6",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
}
},
{
"set": {
"field": "network.type",
"value": "ipv4",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
}
},
{
"geoip": {
"field": "source.ip",
"target_field": "source.geo",
"ignore_missing": true
}
},
{
"geoip": {
"field": "destination.ip",
"target_field": "destination.geo",
"ignore_missing": true
}
},
{
"geoip": {
"ignore_missing": true,
"database_file": "GeoLite2-ASN.mmdb",
"field": "source.ip",
"target_field": "source.as",
"properties": [
"asn",
"organization_name"
]
}
},
{
"geoip": {
"database_file": "GeoLite2-ASN.mmdb",
"field": "destination.ip",
"target_field": "destination.as",
"properties": [
"asn",
"organization_name"
],
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.asn",
"target_field": "source.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.organization_name",
"target_field": "source.as.organization.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.asn",
"target_field": "destination.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.organization_name",
"target_field": "destination.as.organization.name",
"ignore_missing": true
}
},
{
"community_id": {
"target_field": "network.community_id",
"ignore_failure": true
}
},
{
"grok": {
"field": "observer.ingress.interface.name",
"patterns": [
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
],
"ignore_missing": true,
"ignore_failure": true
}
},
{
"set": {
"field": "network.vlan.id",
"copy_from": "observer.ingress.vlan.id",
"ignore_empty_value": true
}
},
{
"append": {
"field": "related.ip",
"value": "{{destination.ip}}",
"allow_duplicates": false,
"if": "ctx.destination?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.nat.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.nat?.ip != null"
}
},
{
"append": {
"field": "related.hosts",
"value": "{{destination.domain}}",
"if": "ctx.destination?.domain != null"
}
},
{
"append": {
"field": "related.user",
"value": "{{user.name}}",
"if": "ctx.user?.name != null"
}
},
{
"set": {
"field": "network.direction",
"value": "{{network.direction}}bound",
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
}
},
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"script": {
"lang": "painless",
"description": "This script processor iterates over the whole document to remove fields with null values.",
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
}
},
{
"remove": {
"field": "event.original",
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "logs-pfsense.log@custom",
"ignore_missing_pipeline": true
}
}
],
"on_failure": [
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"set": {
"field": "event.kind",
"value": "pipeline_error"
}
},
{
"append": {
"field": "error.message",
"value": "{{{ _ingest.on_failure_message }}}"
}
}
],
"_meta": {
"managed_by": "fleet",
"managed": true,
"package": {
"name": "pfsense"
}
}
}

View File

@@ -0,0 +1,31 @@
{
"description": "Pipeline for parsing pfSense Suricata logs.",
"processors": [
{
"pipeline": {
"name": "suricata.common"
}
}
],
"on_failure": [
{
"set": {
"field": "event.kind",
"value": "pipeline_error"
}
},
{
"append": {
"field": "error.message",
"value": "{{{ _ingest.on_failure_message }}}"
}
}
],
"_meta": {
"managed_by": "fleet",
"managed": true,
"package": {
"name": "pfsense"
}
}
}

View File

@@ -56,6 +56,7 @@
{ "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }},
{ "set": { "if": "ctx.scan?.yara?.matches instanceof List", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }},
{ "set": { "if": "ctx.rule?.name != null", "field": "event.dataset", "value": "alert", "override": true }},
{ "set": { "if": "ctx.rule?.name != null", "field": "rule.uuid", "value": "{{rule.name}}", "override": true }},
{ "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }},
{ "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } },
{ "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}},

View File

@@ -13,7 +13,6 @@
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
{ "lowercase": { "field": "network.transport", "ignore_failure": true } },
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },

View File

@@ -27,7 +27,8 @@
"monitor",
"read",
"read_cross_cluster",
"view_index_metadata"
"view_index_metadata",
"write"
]
}
],

View File

@@ -13,7 +13,8 @@
"monitor",
"read",
"read_cross_cluster",
"view_index_metadata"
"view_index_metadata",
"write"
]
}
],

View File

@@ -5,6 +5,10 @@ elasticsearch:
esheap:
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
helpLink: elasticsearch.html
index_clean:
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
forcedType: bool
helpLink: elasticsearch.html
retention:
retention_pct:
decription: Total percentage of space used by Elasticsearch for multi node clusters
@@ -98,10 +102,6 @@ elasticsearch:
policy:
phases:
hot:
max_age:
description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
global: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
@@ -120,7 +120,9 @@ elasticsearch:
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
helpLink: elasticsearch.html
actions:
@@ -131,8 +133,8 @@ elasticsearch:
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^\[0-9\]{1,5}d$
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
actions:
@@ -145,6 +147,8 @@ elasticsearch:
delete:
min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
@@ -271,7 +275,9 @@ elasticsearch:
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
advanced: True
helpLink: elasticsearch.html
@@ -296,7 +302,9 @@ elasticsearch:
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
advanced: True
helpLink: elasticsearch.html
@@ -311,6 +319,8 @@ elasticsearch:
delete:
min_age:
description: Minimum age of index. This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
advanced: True
helpLink: elasticsearch.html
@@ -366,6 +376,7 @@ elasticsearch:
so-logs-azure_x_signinlogs: *indexSettings
so-logs-azure_x_springcloudlogs: *indexSettings
so-logs-barracuda_x_waf: *indexSettings
so-logs-cef_x_log: *indexSettings
so-logs-cisco_asa_x_log: *indexSettings
so-logs-cisco_ftd_x_log: *indexSettings
so-logs-cisco_ios_x_log: *indexSettings
@@ -383,6 +394,7 @@ elasticsearch:
so-logs-darktrace_x_ai_analyst_alert: *indexSettings
so-logs-darktrace_x_model_breach_alert: *indexSettings
so-logs-darktrace_x_system_status_alert: *indexSettings
so-logs-detections_x_alerts: *indexSettings
so-logs-f5_bigip_x_log: *indexSettings
so-logs-fim_x_event: *indexSettings
so-logs-fortinet_x_clientendpoint: *indexSettings
@@ -511,6 +523,7 @@ elasticsearch:
so-suricata: *indexSettings
so-import: *indexSettings
so-kratos: *indexSettings
so-kismet: *indexSettings
so-logstash: *indexSettings
so-redis: *indexSettings
so-strelka: *indexSettings

View File

@@ -2,11 +2,9 @@
{% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %}
{% set PILLAR_GLOBAL_OVERRIDES = {} %}
{% if salt['pillar.get']('elasticsearch:index_settings') is defined %}
{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings') %}
{% if ES_INDEX_PILLAR.global_overrides is defined %}
{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %}
{% endif %}
{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings', {}) %}
{% if ES_INDEX_PILLAR.global_overrides is defined %}
{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %}
{% endif %}
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
@@ -19,6 +17,12 @@
{% set ES_INDEX_SETTINGS = {} %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #}
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
{% endif %}
{% if settings.index_template is defined %}
{% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %}
{% do settings.index_template.template.settings.index.pop('sort') %}

View File

@@ -0,0 +1,36 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-device.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"device": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"manufacturer": {
"ignore_above": 1024,
"type": "keyword"
},
"model": {
"properties": {
"identifier": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
}
}
}
}

View File

@@ -0,0 +1,32 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-base.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"kismet": {
"properties": {
"alerts": {
"properties": {
"count": {
"type": "long"
}
}
},
"first_seen": {
"type": "date"
},
"last_seen": {
"type": "date"
},
"seenby": {
"type": "nested"
}
}
}
}
}
}
}

View File

@@ -77,6 +77,43 @@
"type": "keyword"
}
}
},
"wireless": {
"properties": {
"associated_clients": {
"ignore_above": 1024,
"type": "keyword"
},
"bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"channel": {
"ignore_above": 1024,
"type": "keyword"
},
"channel_utilization": {
"type": "float"
},
"frequency": {
"type": "double"
},
"ssid": {
"ignore_above": 1024,
"type": "keyword"
},
"ssid_cloaked": {
"type": "integer"
},
"known_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"last_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}

View File

@@ -20,21 +20,36 @@
"so_detection": {
"properties": {
"publicId": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"title": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"severity": {
"ignore_above": 1024,
"type": "keyword"
},
"author": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"description": {
"type": "text"
},
"category": {
"ignore_above": 1024,
"type": "keyword"
},
"product": {
"ignore_above": 1024,
"type": "keyword"
},
"service": {
"ignore_above": 1024,
"type": "keyword"
},
"content": {
"type": "text"
},
@@ -48,7 +63,8 @@
"type": "boolean"
},
"tags": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"ruleset": {
"ignore_above": 1024,
@@ -135,4 +151,4 @@
"_meta": {
"ecs_version": "1.12.2"
}
}
}

View File

@@ -5,6 +5,6 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
curl -K /opt/so/conf/elasticsearch/curl.config-X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L "https://localhost:9200/_cat/indices?pretty&v&s=index"

View File

@@ -40,7 +40,7 @@ fi
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
{% if GLOBALS.role == 'so-manager' %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do
{% else %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
{% endif %}

View File

@@ -13,7 +13,7 @@ TOTAL_USED_SPACE=0
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space
{% if GLOBALS.role == 'so-manager' %}
# Get total disk space - disk.total
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do
{% else %}
# Get disk space taken up by indices - disk.indices
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do

View File

@@ -27,6 +27,7 @@ overlimit() {
# 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit.
# Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop.
while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
# If we can't query Elasticsearch, then immediately return false.
@@ -34,28 +35,36 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
[ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
# We iterate through the closed and open indices
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
CLOSED_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
OPEN_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
fi
for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Check if index is an older index. If it is an older index, delete it before moving on to newer indices.
if [[ "$INDEX" =~ "^logstash-.*|so-.*" ]]; then
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
else
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
fi
else
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1
fi
fi
if ! overlimit ; then
exit

View File

@@ -133,7 +133,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
for i in $pattern; do
TEMPLATE=${i::-14}
COMPONENT_PATTERN=${TEMPLATE:3}
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -v osquery)
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then
load_failures=$((load_failures+1))
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"

View File

@@ -9,11 +9,9 @@
'so-influxdb',
'so-kibana',
'so-kratos',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
@@ -32,11 +30,9 @@
'so-kibana',
'so-kratos',
'so-logstash',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',

View File

@@ -98,19 +98,11 @@ firewall:
tcp:
- 7788
udp: []
mysql:
tcp:
- 3306
udp: []
nginx:
tcp:
- 80
- 443
udp: []
playbook:
tcp:
- 3000
udp: []
redis:
tcp:
- 6379
@@ -178,8 +170,6 @@ firewall:
hostgroups:
eval:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -363,8 +353,6 @@ firewall:
hostgroups:
manager:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -559,8 +547,6 @@ firewall:
hostgroups:
managersearch:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -756,8 +742,6 @@ firewall:
- all
standalone:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb

View File

@@ -7,6 +7,7 @@ firewall:
multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
anywhere: &hostgroupsettingsadv
description: List of IP or CIDR blocks to allow access to this hostgroup.
forcedType: "[]string"
@@ -15,6 +16,7 @@ firewall:
advanced: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
beats_endpoint: *hostgroupsettings
beats_endpoint_ssl: *hostgroupsettings
dockernet: &ROhostgroupsettingsadv
@@ -53,6 +55,7 @@ firewall:
multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
customhostgroup1: *customhostgroupsettings
customhostgroup2: *customhostgroupsettings
customhostgroup3: *customhostgroupsettings
@@ -70,12 +73,14 @@ firewall:
helpLink: firewall.html
advanced: True
multiline: True
duplicates: True
udp: &udpsettings
description: List of UDP ports for this port group.
forcedType: "[]string"
helpLink: firewall.html
advanced: True
multiline: True
duplicates: True
agrules:
tcp: *tcpsettings
udp: *udpsettings
@@ -121,15 +126,9 @@ firewall:
localrules:
tcp: *tcpsettings
udp: *udpsettings
mysql:
tcp: *tcpsettings
udp: *udpsettings
nginx:
tcp: *tcpsettings
udp: *udpsettings
playbook:
tcp: *tcpsettings
udp: *udpsettings
redis:
tcp: *tcpsettings
udp: *udpsettings
@@ -193,6 +192,7 @@ firewall:
multiline: True
forcedType: "[]string"
helpLink: firewall.html
duplicates: True
sensor:
portgroups: *portgroupsdocker
searchnode:
@@ -246,6 +246,7 @@ firewall:
multiline: True
forcedType: "[]string"
helpLink: firewall.html
duplicates: True
dockernet:
portgroups: *portgroupshost
localhost:

View File

@@ -28,7 +28,7 @@ global:
description: Used for handling of authentication cookies.
global: True
airgap:
description: Sets airgap mode.
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.
global: True
readonly: True
imagerepo:

View File

@@ -11,6 +11,8 @@ idh_sshd_selinux:
- sel_type: ssh_port_t
- prereq:
- file: openssh_config
- require:
- pkg: python_selinux_mgmt_tools
{% endif %}
openssh_config:

View File

@@ -15,3 +15,9 @@ openssh:
- enable: False
- name: {{ openssh_map.service }}
{% endif %}
{% if grains.os_family == 'RedHat' %}
python_selinux_mgmt_tools:
pkg.installed:
- name: policycoreutils-python-utils
{% endif %}

View File

@@ -9,43 +9,53 @@ idstools:
forcedType: string
helpLink: rules.html
ruleset:
description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
global: True
regex: ETPRO\b|ETOPEN\b
helpLink: rules.html
urls:
description: This is a list of additional rule download locations.
description: This is a list of additional rule download locations. This feature is currently disabled.
global: True
multiline: True
forcedType: "[]string"
readonly: True
helpLink: rules.html
sids:
disabled:
description: Contains the list of NIDS rules manually disabled across the grid. To disable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To disable multiple rules, you can use regular expressions.
description: Contains the list of NIDS rules (or regex patterns) disabled across the grid. This setting is readonly; Use the Detections screen to disable rules.
global: True
multiline: True
forcedType: "[]string"
regex: \d*|re:.*
helpLink: managing-alerts.html
readonlyUi: True
advanced: true
enabled:
description: Contains the list of NIDS rules manually enabled across the grid. To enable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To enable multiple rules, you can use regular expressions.
description: Contains the list of NIDS rules (or regex patterns) enabled across the grid. This setting is readonly; Use the Detections screen to enable rules.
global: True
multiline: True
forcedType: "[]string"
regex: \d*|re:.*
helpLink: managing-alerts.html
readonlyUi: True
advanced: true
modify:
description: Contains the list of NIDS rules that were modified from their default values. Entries must adhere to the following format - SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"
description: Contains the list of NIDS rules (SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"). This setting is readonly; Use the Detections screen to modify rules.
global: True
multiline: True
forcedType: "[]string"
helpLink: managing-alerts.html
readonlyUi: True
advanced: true
rules:
local__rules:
description: Contains the list of custom NIDS rules applied to the grid. To add custom NIDS rules to the grid, enter one rule per line in the Current Grid Value box.
description: Contains the list of custom NIDS rules applied to the grid. This setting is readonly; Use the Detections screen to adjust rules.
file: True
global: True
advanced: True
title: Local Rules
helpLink: local-rules.html
readonlyUi: True
filters__rules:
description: If you are using Suricata for metadata, then you can set custom filters for that metadata here.
file: True

View File

@@ -1,16 +1,15 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
append_so-mysql_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-mysql
- unless: grep -q so-mysql /opt/so/conf/so-status/so-status.conf
so-logstash_image:
docker_image.present:
- name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }}
{% else %}

View File

@@ -10,6 +10,7 @@ logstash:
helpLink: logstash.html
multiline: True
forcedType: "[]string"
duplicates: True
receiver: *assigned_pipelines
heavynode: *assigned_pipelines
searchnode: *assigned_pipelines
@@ -23,6 +24,7 @@ logstash:
helpLink: logstash.html
multiline: True
forcedType: "[]string"
duplicates: True
fleet: *defined_pipelines
manager: *defined_pipelines
search: *defined_pipelines
@@ -38,6 +40,7 @@ logstash:
multiline: True
forcedType: string
helpLink: logstash.html
duplicates: True
custom002: *pipeline_config
custom003: *pipeline_config
custom004: *pipeline_config

View File

@@ -27,6 +27,15 @@ repo_log_dir:
- user
- group
agents_log_dir:
file.directory:
- name: /opt/so/log/agents
- user: root
- group: root
- recurse:
- user
- group
yara_log_dir:
file.directory:
- name: /opt/so/log/yarasync
@@ -64,17 +73,6 @@ manager_sbin:
- exclude_pat:
- "*_test.py"
yara_update_scripts:
file.recurse:
- name: /usr/sbin/
- source: salt://manager/tools/sbin_jinja/
- user: socore
- group: socore
- file_mode: 755
- template: jinja
- defaults:
EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }}
so-repo-file:
file.managed:
- name: /opt/so/conf/reposync/repodownload.conf
@@ -101,6 +99,17 @@ so-repo-sync:
- hour: '{{ MANAGERMERGED.reposync.hour }}'
- minute: '{{ MANAGERMERGED.reposync.minute }}'
so_fleetagent_status:
cron.present:
- name: /usr/sbin/so-elasticagent-status > /opt/so/log/agents/agentstatus.log 2>&1
- identifier: so_fleetagent_status
- user: root
- minute: '*/5'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
socore_own_saltstack:
file.directory:
- name: /opt/so/saltstack
@@ -117,51 +126,6 @@ rules_dir:
- group: socore
- makedirs: True
{% if STRELKAMERGED.rules.enabled %}
strelkarepos:
file.managed:
- name: /opt/so/conf/strelka/repos.txt
- source: salt://strelka/rules/repos.txt.jinja
- template: jinja
- defaults:
STRELKAREPOS: {{ STRELKAMERGED.rules.repos }}
- makedirs: True
strelka-yara-update:
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
cron.present:
{% else %}
cron.absent:
{% endif %}
- user: socore
- name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1'
- identifier: strelka-yara-update
- hour: '7'
- minute: '1'
strelka-yara-download:
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
cron.present:
{% else %}
cron.absent:
{% endif %}
- user: socore
- name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1'
- identifier: strelka-yara-download
- hour: '7'
- minute: '1'
{% if not GLOBALS.airgap %}
update_yara_rules:
cmd.run:
- name: /usr/sbin/so-yara-update
- onchanges:
- file: yara_update_scripts
download_yara_rules:
cmd.run:
- name: /usr/sbin/so-yara-download
- onchanges:
- file: yara_update_scripts
{% endif %}
{% endif %}
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -20,10 +20,6 @@ manager:
description: String of hosts to ignore the proxy settings for.
global: True
helpLink: proxy.html
playbook:
description: Enable playbook 1=enabled 0=disabled.
global: True
helpLink: playbook.html
proxy:
description: Proxy server to use for updates.
global: True

View File

@@ -5,8 +5,6 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start mysql $1
curl -s -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agent_status" | jq .

View File

@@ -201,11 +201,7 @@ function add_idh_to_minion() {
"idh:"\
" enabled: True"\
" restrict_management_ip: $IDH_MGTRESTRICT"\
" services:" >> "$PILLARFILE"
IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES"
for service in ${IDH_SERVICES_ARRAY[@]}; do
echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE"
done
" " >> $PILLARFILE
}
function add_logstash_to_minion() {
@@ -286,12 +282,6 @@ function add_sensor_to_minion() {
echo " " >> $PILLARFILE
}
function add_playbook_to_minion() {
printf '%s\n'\
"playbook:"\
" enabled: True"\
" " >> $PILLARFILE
}
function add_elastalert_to_minion() {
printf '%s\n'\
@@ -353,13 +343,6 @@ function add_nginx_to_minion() {
" " >> $PILLARFILE
}
function add_soctopus_to_minion() {
printf '%s\n'\
"soctopus:"\
" enabled: True"\
" " >> $PILLARFILE
}
function add_soc_to_minion() {
printf '%s\n'\
"soc:"\
@@ -374,13 +357,6 @@ function add_registry_to_minion() {
" " >> $PILLARFILE
}
function add_mysql_to_minion() {
printf '%s\n'\
"mysql:"\
" enabled: True"\
" " >> $PILLARFILE
}
function add_kratos_to_minion() {
printf '%s\n'\
"kratos:"\
@@ -446,26 +422,19 @@ function checkMine() {
}
function updateMine() {
retry 20 1 "salt '$MINION_ID' mine.update" True
}
function createEVAL() {
is_pcaplimit=true
pcapspace
add_elasticsearch_to_minion
add_sensor_to_minion
add_strelka_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -478,17 +447,14 @@ function createSTANDALONE() {
add_logstash_to_minion
add_sensor_to_minion
add_strelka_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_redis_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -497,17 +463,14 @@ function createSTANDALONE() {
function createMANAGER() {
add_elasticsearch_to_minion
add_logstash_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_redis_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -516,17 +479,14 @@ function createMANAGER() {
function createMANAGERSEARCH() {
add_elasticsearch_to_minion
add_logstash_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_redis_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -636,20 +596,16 @@ function addMinion() {
}
function updateMineAndApplyStates() {
# tell the minion to populate the mine with data from mine_functions which is populated during setup
# this only needs to happen on non managers since they handle this during setup
# and they need to wait for ca creation to update the mine
updateMine
checkMine "network.ip_addrs"
# apply the elasticsearch state to the manager if a new searchnode was added
#checkMine "network.ip_addrs"
# calls so-common and set_minionid sets MINIONID to local minion id
set_minionid
# if this is a searchnode or heavynode, start downloading logstash and elasticsearch containers while the manager prepares for the new node
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
# calls so-common and set_minionid sets MINIONID to local minion id
set_minionid
salt $MINIONID state.apply elasticsearch queue=True --async
salt $MINIONID state.apply soc queue=True --async
salt-run state.orch orch.container_download pillar="{'setup': {'newnode': $MINION_ID }}" > /dev/null 2>&1 &
fi
# run this async so the cli doesn't wait for a return
salt "$MINION_ID" state.highstate --async queue=True
# $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured
salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 &
}
function setupMinionFiles() {

View File

@@ -17,13 +17,16 @@ def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
print(' LISTITEM - Item to add to the list.')
print(' VALUE - Value to set for a given key')
print(' LISTITEM - Item to append to a given key\'s list value')
sys.exit(1)
@@ -37,6 +40,7 @@ def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def appendItem(content, key, listItem):
pieces = key.split(".", 1)
if len(pieces) > 1:
@@ -51,6 +55,30 @@ def appendItem(content, key, listItem):
print("The key provided does not exist. No action was taken on the file.")
return 1
def convertType(value):
if len(value) > 0 and (not value.startswith("0") or len(value) == 1):
if "." in value:
try:
value = float(value)
return value
except ValueError:
pass
try:
value = int(value)
return value
except ValueError:
pass
lowered_value = value.lower()
if lowered_value == "false":
return False
elif lowered_value == "true":
return True
return value
def append(args):
if len(args) != 3:
print('Missing filename, key arg, or list item to append', file=sys.stderr)
@@ -62,11 +90,41 @@ def append(args):
listItem = args[2]
content = loadYaml(filename)
appendItem(content, key, listItem)
appendItem(content, key, convertType(listItem))
writeYaml(filename, content)
return 0
def addKey(content, key, value):
pieces = key.split(".", 1)
if len(pieces) > 1:
if not pieces[0] in content:
content[pieces[0]] = {}
addKey(content[pieces[0]], pieces[1], value)
elif key in content:
raise KeyError("key already exists")
else:
content[key] = value
def add(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def removeKey(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
@@ -91,6 +149,24 @@ def remove(args):
return 0
def replace(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
removeKey(content, key)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def main():
args = sys.argv[1:]
@@ -100,8 +176,10 @@ def main():
commands = {
"help": showUsage,
"add": add,
"append": append,
"remove": remove,
"replace": replace,
}
code = 1

View File

@@ -42,6 +42,14 @@ class TestRemove(unittest.TestCase):
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_remove_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.remove(["file"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
@@ -106,6 +114,14 @@ class TestRemove(unittest.TestCase):
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_append_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.append(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n")
def test_append(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
@@ -201,3 +217,146 @@ class TestRemove(unittest.TestCase):
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
def test_add_key(self):
content = {}
soyaml.addKey(content, "foo", 123)
self.assertEqual(content, {"foo": 123})
try:
soyaml.addKey(content, "foo", "bar")
self.assertFail("expected key error since key already exists")
except KeyError:
pass
try:
soyaml.addKey(content, "foo.bar", 123)
self.assertFail("expected type error since key parent value is not a map")
except TypeError:
pass
content = {}
soyaml.addKey(content, "foo", "bar")
self.assertEqual(content, {"foo": "bar"})
soyaml.addKey(content, "badda.badda", "boom")
self.assertEqual(content, {"foo": "bar", "badda": {"badda": "boom"}})
def test_add_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.add(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_add(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.add([filename, "key4", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\nkey4: d\n"
self.assertEqual(actual, expected)
def test_add_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child3", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n child3: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_add_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child2.deep2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.replace(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_replace(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.replace([filename, "key2", True])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: true\nkey3:\n- a\n- b\n- c\n"
self.assertEqual(actual, expected)
def test_replace_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2.deep1", 46])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 46\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_convert(self):
self.assertEqual(soyaml.convertType("foo"), "foo")
self.assertEqual(soyaml.convertType("foo.bar"), "foo.bar")
self.assertEqual(soyaml.convertType("123"), 123)
self.assertEqual(soyaml.convertType("0"), 0)
self.assertEqual(soyaml.convertType("00"), "00")
self.assertEqual(soyaml.convertType("0123"), "0123")
self.assertEqual(soyaml.convertType("123.456"), 123.456)
self.assertEqual(soyaml.convertType("0123.456"), "0123.456")
self.assertEqual(soyaml.convertType("true"), True)
self.assertEqual(soyaml.convertType("TRUE"), True)
self.assertEqual(soyaml.convertType("false"), False)
self.assertEqual(soyaml.convertType("FALSE"), False)
self.assertEqual(soyaml.convertType(""), "")

View File

@@ -229,7 +229,7 @@ check_local_mods() {
# {% endraw %}
check_pillar_items() {
local pillar_output=$(salt-call pillar.items --out=json)
local pillar_output=$(salt-call pillar.items -lerror --out=json)
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
if [[ "$cond" == "true" ]]; then
@@ -357,6 +357,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
true
}
@@ -373,6 +374,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
true
}
@@ -435,6 +437,17 @@ post_to_2.4.60() {
POSTVERSION=2.4.60
}
post_to_2.4.70() {
printf "\nRemoving idh.services from any existing IDH node pillar files\n"
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
echo "Removing idh.services from: $file"
so-yaml.py remove "$file" idh.services
fi
done
POSTVERSION=2.4.70
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -574,6 +587,161 @@ up_to_2.4.60() {
INSTALLEDVERSION=2.4.60
}
up_to_2.4.70() {
playbook_migration
suricata_idstools_migration
toggle_telemetry
add_detection_test_pillars
INSTALLEDVERSION=2.4.70
}
add_detection_test_pillars() {
if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
echo "Adding detection pillar values for automated testing"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.allowRegex SecurityOnion
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.failAfterConsecutiveErrorCount 1
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.allowRegex "EquationGroup_Toolset_Apr17__ELV_.*"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.failAfterConsecutiveErrorCount 1
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.allowRegex "(200033\\d|2100538|2102466)"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.failAfterConsecutiveErrorCount 1
fi
}
toggle_telemetry() {
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
cat << ASSIST_EOF
--------------- SOC Telemetry ---------------
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
ASSIST_EOF
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
read -r input
input=$(echo "${input,,}" | xargs echo -n)
echo ""
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
echo "Thank you for helping improve Security Onion!"
else
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
echo "Disabled SOC Telemetry."
else
fail "Failed to disable SOC Telemetry; aborting."
fi
fi
echo ""
fi
}
suricata_idstools_migration() {
#Backup the pillars for idstools
mkdir -p /nsm/backup/detections-migration/idstools
rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools
if [[ $? -eq 0 ]]; then
echo "IDStools configuration has been backed up."
else
fail "Error: rsync failed to copy the files. IDStools configuration has not been backed up."
fi
#Backup Thresholds
mkdir -p /nsm/backup/detections-migration/suricata
rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata
if [[ $? -eq 0 ]]; then
echo "Suricata thresholds have been backed up."
else
fail "Error: rsync failed to copy the files. Thresholds have not been backed up."
fi
#Backup local rules
mkdir -p /nsm/backup/detections-migration/suricata/local-rules
rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules
if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then
rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak
fi
#Tell SOC to migrate
mkdir -p /opt/so/conf/soc/migrations
echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70
chown -R socore:socore /opt/so/conf/soc/migrations
}
playbook_migration() {
# Start SOC Detections migration
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
# Remove cronjobs
crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
# Check for active Elastalert rules
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l)
if [[ "$active_rules_count" -gt 0 ]]; then
# Prompt the user to press ENTER if active Elastalert rules found
echo
echo "$active_rules_count Active Elastalert/Playbook rules found."
echo "In preparation for the new Detections module, they will be backed up and then disabled."
echo
echo "Press ENTER to proceed."
echo
# Read user input
read -r
echo "Backing up the Elastalert rules..."
rsync -av --ignore-missing-args --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/
# Verify that rsync completed successfully
if [[ $? -eq 0 ]]; then
# Delete the Elastlaert rules
rm -f /opt/so/rules/elastalert/playbook/*.yaml
echo "Active Elastalert rules have been backed up."
else
fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
fi
fi
echo
echo "Exporting Sigma rules from Playbook..."
MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
done || fail "Failed to export Sigma rules..."
echo
echo "Exporting Sigma Filters from Playbook..."
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."
echo
echo "Backing up Playbook database..."
docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
fi
echo
echo "Stopping Playbook services & cleaning up..."
for container in so-playbook so-mysql so-soctopus; do
if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
docker stop $container
fi
done
sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*
echo
echo "Playbook Migration is complete...."
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
@@ -617,12 +785,6 @@ unmount_update() {
update_airgap_rules() {
# Copy the rules over to update them for airgap.
rsync -av $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
rsync -av $UPDATE_DIR/agrules/yara/* /nsm/rules/yara/
if [ -d /nsm/repo/rules/sigma ]; then
rsync -av $UPDATE_DIR/agrules/sigma/* /nsm/repo/rules/sigma/
fi
# SOC Detections Airgap
rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
}
@@ -756,7 +918,7 @@ verify_latest_update_script() {
else
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null
# Verify that soup scripts updated as expected
get_soup_script_hashes
@@ -837,7 +999,6 @@ main() {
echo "### Preparing soup at $(date) ###"
echo ""
set_os
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
@@ -903,6 +1064,7 @@ main() {
backup_old_states_pillars
fi
copy_new_files
create_local_directories "/opt/so/saltstack/default"
apply_hotfix
echo "Hotfix applied"
update_version
@@ -969,6 +1131,7 @@ main() {
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
copy_new_files
echo ""
create_local_directories "/opt/so/saltstack/default"
update_version
echo ""

View File

@@ -1,51 +0,0 @@
#!/bin/bash
NOROOT=1
. /usr/sbin/so-common
{%- set proxy = salt['pillar.get']('manager:proxy') %}
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
# Download the rules from the internet
{%- if proxy %}
export http_proxy={{ proxy }}
export https_proxy={{ proxy }}
export no_proxy="{{ noproxy }}"
{%- endif %}
repos="/opt/so/conf/strelka/repos.txt"
output_dir=/nsm/rules/yara
gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com)
clone_dir="/tmp"
if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
while IFS= read -r repo; do
if ! $(echo "$repo" | grep -qE '^#'); then
# Remove old repo if existing bc of previous error condition or unexpected disruption
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
[ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name
# Clone repo and make appropriate directories for rules
git clone $repo $clone_dir/$repo_name
echo "Analyzing rules from $clone_dir/$repo_name..."
mkdir -p $output_dir/$repo_name
# Ensure a copy of the license is available for the rules
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
# Copy over rules
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
cp $i $output_dir/$repo_name
done
rm -rf $clone_dir/$repo_name
fi
done < $repos
echo "Done!"
/usr/sbin/so-yara-update
else
echo "Server returned $gh_status status code."
echo "No connectivity to Github...exiting..."
exit 1
fi

View File

@@ -1,41 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
NOROOT=1
. /usr/sbin/so-common
echo "Starting to check for yara rule updates at $(date)..."
newcounter=0
excludedcounter=0
excluded_rules=({{ EXCLUDEDRULES | join(' ') }})
# Pull down the SO Rules
SORULEDIR=/nsm/rules/yara
OUTPUTDIR=/opt/so/saltstack/local/salt/strelka/rules
mkdir -p $OUTPUTDIR
# remove all rules prior to copy so we can clear out old rules
rm -f $OUTPUTDIR/*
for i in $(find $SORULEDIR -name "*.yar" -o -name "*.yara"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
if [[ ! "${excluded_rules[*]}" =~ ${rule_name} ]]; then
echo "Adding rule: $rule_name..."
cp $i $OUTPUTDIR/$rule_name
((newcounter++))
else
echo "Excluding rule: $rule_name..."
((excludedcounter++))
fi
done
if [ "$newcounter" -gt 0 ] || [ "$excludedcounter" -gt 0 ];then
echo "$newcounter rules added."
echo "$excludedcounter rule(s) excluded."
fi
echo "Finished rule updates at $(date)..."

View File

@@ -1,89 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
# MySQL Setup
mysqlpkgs:
pkg.removed:
- skip_suggestions: False
- pkgs:
{% if grains['os_family'] != 'RedHat' %}
- python3-mysqldb
{% else %}
- python3-mysqlclient
{% endif %}
mysqletcdir:
file.directory:
- name: /opt/so/conf/mysql/etc
- user: 939
- group: 939
- makedirs: True
mysqlpiddir:
file.directory:
- name: /opt/so/conf/mysql/pid
- user: 939
- group: 939
- makedirs: True
mysqlcnf:
file.managed:
- name: /opt/so/conf/mysql/etc/my.cnf
- source: salt://mysql/etc/my.cnf
- user: 939
- group: 939
mysqlpass:
file.managed:
- name: /opt/so/conf/mysql/etc/mypass
- source: salt://mysql/etc/mypass
- user: 939
- group: 939
- template: jinja
- defaults:
MYSQLPASS: {{ MYSQLPASS }}
mysqllogdir:
file.directory:
- name: /opt/so/log/mysql
- user: 939
- group: 939
- makedirs: True
mysqldatadir:
file.directory:
- name: /nsm/mysql
- user: 939
- group: 939
- makedirs: True
mysql_sbin:
file.recurse:
- name: /usr/sbin
- source: salt://mysql/tools/sbin
- user: 939
- group: 939
- file_mode: 755
#mysql_sbin_jinja:
# file.recurse:
# - name: /usr/sbin
# - source: salt://mysql/tools/sbin_jinja
# - user: 939
# - group: 939
# - file_mode: 755
# - template: jinja
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,2 +0,0 @@
mysql:
enabled: False

View File

@@ -1,84 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
include:
- mysql.config
- mysql.sostatus
{% if MYSQLPASS == None %}
mysql_password_none:
test.configurable_test_state:
- changes: False
- result: False
- comment: "MySQL Password Error - Not Starting MySQL"
{% else %}
so-mysql:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }}
- hostname: so-mysql
- user: socore
- networks:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-mysql'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-mysql'].port_bindings %}
- {{ BINDING }}
{% endfor %}
- environment:
- MYSQL_ROOT_HOST={{ GLOBALS.so_docker_gateway }}
- MYSQL_ROOT_PASSWORD=/etc/mypass
{% if DOCKER.containers['so-mysql'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
- /opt/so/conf/mysql/etc/mypass:/etc/mypass
- /nsm/mysql:/var/lib/mysql:rw
- /opt/so/log/mysql:/var/log/mysql:rw
{% if DOCKER.containers['so-mysql'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- cap_add:
- SYS_NICE
- watch:
- file: mysqlcnf
- file: mysqlpass
- require:
- file: mysqlcnf
- file: mysqlpass
{% endif %}
delete_so-mysql_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-mysql$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,32 +0,0 @@
# For advice on how to change settings please see
# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html
[mysqld]
#
# Remove leading # and set to the amount of RAM for the most important data
# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%.
# innodb_buffer_pool_size = 128M
#
# Remove leading # to turn on a very important data integrity option: logging
# changes to the binary log between backups.
# log_bin
#
# Remove leading # to set options mainly useful for reporting servers.
# The server defaults are faster for transactions and fast SELECTs.
# Adjust sizes as needed, experiment to find the optimal values.
# join_buffer_size = 128M
# sort_buffer_size = 2M
# read_rnd_buffer_size = 2M
host_cache_size=0
skip-name-resolve
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
secure-file-priv=/var/lib/mysql-files
user=socore
log-error=/var/log/mysql/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
# Switch back to the native password module so that playbook can connect
authentication_policy=mysql_native_password

View File

@@ -1 +0,0 @@
{{ MYSQLPASS }}

View File

@@ -1,14 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'mysql/map.jinja' import MYSQLMERGED %}
include:
{% if MYSQLMERGED.enabled %}
- mysql.enabled
{% else %}
- mysql.disabled
{% endif %}

View File

@@ -1,7 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'mysql/defaults.yaml' as MYSQLDEFAULTS with context %}
{% set MYSQLMERGED = salt['pillar.get']('mysql', MYSQLDEFAULTS.mysql, merge=True) %}

View File

@@ -1,4 +0,0 @@
mysql:
enabled:
description: You can enable or disable MySQL.
advanced: True

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart mysql $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop mysql $1

View File

@@ -277,38 +277,11 @@ http {
proxy_set_header X-Forwarded-Proto $scheme;
}
location /playbook/ {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ GLOBALS.manager }}:3000/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /soctopus/ {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ GLOBALS.manager }}:7000/;
proxy_read_timeout 300;
proxy_connect_timeout 300;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/app/soc/ {
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
}
location /kibana/app/soctopus/ {
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
}
location /sensoroniagents/ {
if ($http_authorization = "") {

View File

@@ -0,0 +1,17 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set NEWNODE = salt['pillar.get']('setup:newnode') %}
{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %}
{{NEWNODE}}_download_logstash_elasticsearch:
salt.state:
- tgt: {{ NEWNODE }}
- sls:
- repo.client
- docker
- logstash.download
- elasticsearch.download
{% endif %}

View File

@@ -0,0 +1,32 @@
{% set MANAGER = salt['pillar.get']('setup:manager') %}
{% set NEWNODE = salt['pillar.get']('setup:newnode') %}
# tell the minion to populate the mine with data from mine_functions which is populated during setup
# this only needs to happen on non managers since they handle this during setup
# and they need to wait for ca creation to update the mine
{{NEWNODE}}_update_mine:
salt.function:
- name: mine.update
- tgt: {{ NEWNODE }}
- retry:
attempts: 36
interval: 5
# we need to prepare the manager for a new searchnode or heavynode
{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %}
manager_run_es_soc:
salt.state:
- tgt: {{ MANAGER }}
- sls:
- elasticsearch
- soc
- queue: True
- require:
- salt: {{NEWNODE}}_update_mine
{% endif %}
{{NEWNODE}}_run_highstate:
salt.state:
- tgt: {{ NEWNODE }}
- highstate: True
- queue: True

View File

@@ -1,19 +0,0 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
# This state will create the SecOps Automation user within Playbook
include:
- playbook
wait_for_playbook:
cmd.run:
- name: until nc -z {{ GLOBALS.manager }} 3000; do sleep 1; done
- timeout: 300
create_user:
cmd.script:
- source: salt://playbook/files/automation_user_create.sh
- cwd: /root
- template: jinja
- onchanges:
- cmd: wait_for_playbook

View File

@@ -1,120 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %}
include:
- mysql
create_playbookdbuser:
mysql_user.present:
- name: playbookdbuser
- password: {{ PLAYBOOKPASS }}
- host: "{{ DOCKER.range.split('/')[0] }}/255.255.255.0"
- connection_host: {{ GLOBALS.manager }}
- connection_port: 3306
- connection_user: root
- connection_pass: {{ MYSQLPASS }}
query_playbookdbuser_grants:
mysql_query.run:
- database: playbook
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.range.split('/')[0] }}/255.255.255.0';"
- connection_host: {{ GLOBALS.manager }}
- connection_port: 3306
- connection_user: root
- connection_pass: {{ MYSQLPASS }}
query_updatwebhooks:
mysql_query.run:
- database: playbook
- query: "update webhooks set url = 'http://{{ GLOBALS.manager_ip}}:7000/playbook/webhook' where project_id = 1"
- connection_host: {{ GLOBALS.manager }}
- connection_port: 3306
- connection_user: root
- connection_pass: {{ MYSQLPASS }}
query_updatename:
mysql_query.run:
- database: playbook
- query: "update custom_fields set name = 'Custom Filter' where id = 21;"
- connection_host: {{ GLOBALS.manager }}
- connection_port: 3306
- connection_user: root
- connection_pass: {{ MYSQLPASS }}
query_updatepluginurls:
mysql_query.run:
- database: playbook
- query: |-
update settings set value =
"--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
project: '1'
convert_url: http://{{ GLOBALS.manager }}:7000/playbook/sigmac
create_url: http://{{ GLOBALS.manager }}:7000/playbook/play"
where id = 43
- connection_host: {{ GLOBALS.manager }}
- connection_port: 3306
- connection_user: root
- connection_pass: {{ MYSQLPASS }}
playbook_sbin:
file.recurse:
- name: /usr/sbin
- source: salt://playbook/tools/sbin
- user: 939
- group: 939
- file_mode: 755
#playbook_sbin_jinja:
# file.recurse:
# - name: /usr/sbin
# - source: salt://playbook/tools/sbin_jinja
# - user: 939
# - group: 939
# - file_mode: 755
# - template: jinja
playbooklogdir:
file.directory:
- name: /opt/so/log/playbook
- dir_mode: 775
- user: 939
- group: 939
- makedirs: True
playbookfilesdir:
file.directory:
- name: /opt/so/conf/playbook/redmine-files
- dir_mode: 775
- user: 939
- group: 939
- makedirs: True
{% if 'idh' in salt['cmd.shell']("ls /opt/so/saltstack/local/pillar/minions/|awk -F'_' {'print $2'}|awk -F'.' {'print $1'}").split() %}
idh-plays:
file.recurse:
- name: /opt/so/conf/soctopus/sigma-import
- source: salt://idh/plays
- makedirs: True
cmd.run:
- name: so-playbook-import True
- onchanges:
- file: /opt/so/conf/soctopus/sigma-import
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

Some files were not shown because too many files have changed in this diff Show More