Merge pull request #11777 from Security-Onion-Solutions/2.4/dev

2.4.30
This commit is contained in:
Mike Reeves
2023-11-13 15:27:24 -05:00
committed by GitHub
95 changed files with 12641 additions and 5637 deletions

View File

@@ -4,9 +4,11 @@ on:
push: push:
paths: paths:
- "salt/sensoroni/files/analyzers/**" - "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin"
pull_request: pull_request:
paths: paths:
- "salt/sensoroni/files/analyzers/**" - "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin"
jobs: jobs:
build: build:
@@ -16,7 +18,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.10"] python-version: ["3.10"]
python-code-path: ["salt/sensoroni/files/analyzers"] python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@@ -34,4 +36,4 @@ jobs:
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
- name: Test with pytest - name: Test with pytest
run: | run: |
pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=${{ matrix.python-code-path }}/pytest.ini pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini

View File

@@ -1,18 +1,18 @@
### 2.4.20-20231012 ISO image released on 2023/10/12 ### 2.4.30-20231113 ISO image released on 2023/11/13
### Download and Verify ### Download and Verify
2.4.20-20231012 ISO image: 2.4.30-20231113 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231113.iso
MD5: 7D6ACA843068BA9432B3FF63BFD1EF0F MD5: 15EB5A74782E4C2D5663D29E275839F6
SHA1: BEF2B906066A1B04921DF0B80E7FDD4BC8ECED5C SHA1: BBD4A7D77ADDA94B866F1EFED846A83DDFD34D73
SHA256: 5D511D50F11666C69AE12435A47B9A2D30CB3CC88F8D38DC58A5BC0ECADF1BF5 SHA256: 4509EB8E11DB49C6CD3905C74C5525BDB1F773488002179A846E00DE8E499988
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231113.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231113.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231113.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.20-20231012.iso.sig securityonion-2.4.20-20231012.iso gpg --verify securityonion-2.4.30-20231113.iso.sig securityonion-2.4.30-20231113.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Thu 12 Oct 2023 01:28:32 PM EDT using RSA key ID FE507013 gpg: Signature made Mon 13 Nov 2023 09:23:21 AM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.

2
HOTFIX
View File

@@ -1 +1 @@
20231012

View File

@@ -1 +1 @@
2.4.20 2.4.30

View File

@@ -12,7 +12,6 @@ role:
eval: eval:
fleet: fleet:
heavynode: heavynode:
helixsensor:
idh: idh:
import: import:
manager: manager:

View File

@@ -7,19 +7,23 @@
tgt_type='compound') | dictsort() tgt_type='compound') | dictsort()
%} %}
{% set hostname = cached_grains[minionid]['host'] %} # only add a node to the pillar if it returned an ip from the mine
{% set node_type = minionid.split('_')[1] %} {% if ip | length > 0%}
{% if node_type not in node_types.keys() %} {% set hostname = cached_grains[minionid]['host'] %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %} {% set node_type = minionid.split('_')[1] %}
{% else %} {% if node_type not in node_types.keys() %}
{% if hostname not in node_types[node_type] %} {% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %} {% else %}
{% do node_types[node_type][hostname].update(ip[0]) %} {% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %} {% endif %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
logstash: logstash:
nodes: nodes:
{% for node_type, values in node_types.items() %} {% for node_type, values in node_types.items() %}

View File

@@ -4,18 +4,22 @@
{% set hostname = minionid.split('_')[0] %} {% set hostname = minionid.split('_')[0] %}
{% set node_type = minionid.split('_')[1] %} {% set node_type = minionid.split('_')[1] %}
{% set is_alive = False %} {% set is_alive = False %}
{% if minionid in manage_alived.keys() %}
{% if ip[0] == manage_alived[minionid] %} # only add a node to the pillar if it returned an ip from the mine
{% set is_alive = True %} {% if ip | length > 0%}
{% if minionid in manage_alived.keys() %}
{% if ip[0] == manage_alived[minionid] %}
{% set is_alive = True %}
{% endif %}
{% endif %} {% endif %}
{% endif %} {% if node_type not in node_types.keys() %}
{% if node_type not in node_types.keys() %} {% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
{% else %} {% else %}
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %} {% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
{% else %}
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %}
{% endif %}
{% endif %} {% endif %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}

View File

@@ -1,44 +0,0 @@
thresholding:
sids:
8675309:
- threshold:
gen_id: 1
type: threshold
track: by_src
count: 10
seconds: 10
- threshold:
gen_id: 1
type: limit
track: by_dst
count: 100
seconds: 30
- rate_filter:
gen_id: 1
track: by_rule
count: 50
seconds: 30
new_action: alert
timeout: 30
- suppress:
gen_id: 1
track: by_either
ip: 10.10.3.7
11223344:
- threshold:
gen_id: 1
type: limit
track: by_dst
count: 10
seconds: 10
- rate_filter:
gen_id: 1
track: by_src
count: 50
seconds: 20
new_action: pass
timeout: 60
- suppress:
gen_id: 1
track: by_src
ip: 10.10.3.0/24

View File

@@ -1,20 +0,0 @@
thresholding:
sids:
<signature id>:
- threshold:
gen_id: <generator id>
type: <threshold | limit | both>
track: <by_src | by_dst>
count: <count>
seconds: <seconds>
- rate_filter:
gen_id: <generator id>
track: <by_src | by_dst | by_rule | by_both>
count: <count>
seconds: <seconds>
new_action: <alert | pass>
timeout: <seconds>
- suppress:
gen_id: <generator id>
track: <by_src | by_dst | by_either>
ip: <ip | subnet>

26
pyci.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <python_script_dir>"
echo "Runs tests on all *_test.py files in the given directory."
exit 1
fi
HOME_DIR=$(dirname "$0")
TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
echo "Missing dependencies. Consider running the following command:"
echo " python -m pip install flake8 pytest pytest-cov"
exit 1
fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"

10
salt/bpf/macros.jinja Normal file
View File

@@ -0,0 +1,10 @@
{% macro remove_comments(bpfmerged, app) %}
{# remove comments from the bpf #}
{% for bpf in bpfmerged[app] %}
{% if bpf.strip().startswith('#') %}
{% do bpfmerged[app].pop(loop.index0) %}
{% endif %}
{% endfor %}
{% endmacro %}

View File

@@ -1,4 +1,7 @@
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %} {% set PCAPBPF = BPFMERGED.pcap %}

View File

@@ -1,4 +1,7 @@
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
{% set SURICATABPF = BPFMERGED.suricata %} {% set SURICATABPF = BPFMERGED.suricata %}

View File

@@ -1,4 +1,7 @@
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
{% set ZEEKBPF = BPFMERGED.zeek %} {% set ZEEKBPF = BPFMERGED.zeek %}

View File

@@ -50,6 +50,12 @@ pki_public_ca_crt:
attempts: 5 attempts: 5
interval: 30 interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms: cakeyperms:
file.managed: file.managed:
- replace: False - replace: False

View File

@@ -8,6 +8,7 @@ include:
- common.packages - common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state - manager.elasticsearch # needed for elastic_curl_config state
- manager.kibana
{% endif %} {% endif %}
net.core.wmem_default: net.core.wmem_default:

View File

@@ -8,7 +8,7 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a # Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files, # script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file. # and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.8.2" ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
@@ -133,22 +133,37 @@ check_elastic_license() {
} }
check_salt_master_status() { check_salt_master_status() {
local timeout=$1 local count=0
echo "Checking if we can talk to the salt master" local attempts="${1:- 10}"
salt-call state.show_top concurrent=true current_time="$(date '+%b %d %H:%M:%S')"
echo "Checking if we can access the salt master and that it is ready at: ${current_time}"
return while ! salt-call state.show_top -l error concurrent=true 1> /dev/null; do
current_time="$(date '+%b %d %H:%M:%S')"
echo "Can't access salt master or it is not ready at: ${current_time}"
((count+=1))
if [[ $count -eq $attempts ]]; then
# 10 attempts takes about 5.5 minutes
echo "Gave up trying to access salt-master"
return 1
fi
done
current_time="$(date '+%b %d %H:%M:%S')"
echo "Successfully accessed and salt master ready at: ${current_time}"
return 0
} }
# this is only intended to be used to check the status of the minion from a salt master
check_salt_minion_status() { check_salt_minion_status() {
local timeout=$1 local minion="$1"
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 local timeout="${2:-5}"
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 local logfile="${3:-'/dev/stdout'}"
echo "Checking if the salt minion: $minion will respond to jobs" >> "$logfile" 2>&1
salt "$minion" test.ping -t $timeout > /dev/null 2>&1
local status=$? local status=$?
if [ $status -gt 0 ]; then if [ $status -gt 0 ]; then
echo " Minion did not respond" >> "$setup_log" 2>&1 echo " Minion did not respond" >> "$logfile" 2>&1
else else
echo " Received job response from salt minion" >> "$setup_log" 2>&1 echo " Received job response from salt minion" >> "$logfile" 2>&1
fi fi
return $status return $status
@@ -382,6 +397,10 @@ retry() {
echo "<Start of output>" echo "<Start of output>"
echo "$output" echo "$output"
echo "<End of output>" echo "<End of output>"
if [[ $exitcode -eq 0 ]]; then
echo "Forcing exit code to 1"
exitcode=1
fi
fi fi
elif [ -n "$failedOutput" ]; then elif [ -n "$failedOutput" ]; then
if [[ "$output" =~ "$failedOutput" ]]; then if [[ "$output" =~ "$failedOutput" ]]; then
@@ -390,7 +409,7 @@ retry() {
echo "$output" echo "$output"
echo "<End of output>" echo "<End of output>"
if [[ $exitcode -eq 0 ]]; then if [[ $exitcode -eq 0 ]]; then
echo "The exitcode was 0, but we are setting to 1 since we found $failedOutput in the output." echo "Forcing exit code to 1"
exitcode=1 exitcode=1
fi fi
else else
@@ -428,6 +447,24 @@ run_check_net_err() {
fi fi
} }
wait_for_salt_minion() {
local minion="$1"
local timeout="${2:-5}"
local logfile="${3:-'/dev/stdout'}"
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
attempt=$((attempt+1))
if [[ $attempt -eq $maxAttempts ]]; then
return 1
fi
sleep 10
done
return 0
}
salt_minion_count() { salt_minion_count() {
local MINIONDIR="/opt/so/saltstack/local/pillar/minions" local MINIONDIR="/opt/so/saltstack/local/pillar/minions"
MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l) MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l)
@@ -440,19 +477,51 @@ set_os() {
OS=rocky OS=rocky
OSVER=9 OSVER=9
is_rocky=true is_rocky=true
is_rpm=true
elif grep -q "CentOS Stream release 9" /etc/redhat-release; then elif grep -q "CentOS Stream release 9" /etc/redhat-release; then
OS=centos OS=centos
OSVER=9 OSVER=9
is_centos=true is_centos=true
elif grep -q "Oracle Linux Server release 9" /etc/system-release; then is_rpm=true
OS=oel elif grep -q "AlmaLinux release 9" /etc/redhat-release; then
OS=alma
OSVER=9 OSVER=9
is_oracle=true is_alma=true
is_rpm=true
elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then
if [ -f /etc/oracle-release ]; then
OS=oracle
OSVER=9
is_oracle=true
is_rpm=true
else
OS=rhel
OSVER=9
is_rhel=true
is_rpm=true
fi
fi fi
cron_service_name="crond" cron_service_name="crond"
else elif [ -f /etc/os-release ]; then
OS=ubuntu if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
is_ubuntu=true OSVER=focal
UBVER=20.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then
OSVER=jammy
UBVER=22.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then
OSVER=bookworm
DEBVER=12
is_debian=true
OS=debian
is_deb=true
fi
cron_service_name="cron" cron_service_name="cron"
fi fi
} }
@@ -486,6 +555,10 @@ set_version() {
fi fi
} }
status () {
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
}
systemctl_func() { systemctl_func() {
local action=$1 local action=$1
local echo_action=$1 local echo_action=$1

View File

@@ -137,7 +137,7 @@ update_docker_containers() {
for i in "${TRUSTED_CONTAINERS[@]}" for i in "${TRUSTED_CONTAINERS[@]}"
do do
if [ -z "$PROGRESS_CALLBACK" ]; then if [ -z "$PROGRESS_CALLBACK" ]; then
echo "Downloading $i" >> "$LOG_FILE" 2>&1 echo "Downloading $i" >> "$LOG_FILE" 2>&1
else else
$PROGRESS_CALLBACK $i $PROGRESS_CALLBACK $i
fi fi

View File

@@ -114,6 +114,11 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # server not yet ready (influxdb not yet setup) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # server not yet ready (influxdb not yet setup)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|search_phase_execution_exception" # server not yet ready (elastalert running searches before ES is ready)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving docker" # Telegraf unable to reach Docker engine, rare
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving container" # Telegraf unable to reach Docker engine, rare
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -136,6 +141,8 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
fi fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -157,6 +164,9 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to determine destination index stats" # Elastic transform temporary error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
@@ -230,4 +240,4 @@ else
echo -e "\nResult: One or more errors found" echo -e "\nResult: One or more errors found"
fi fi
exit $RESULT exit $RESULT

View File

@@ -1,67 +0,0 @@
#!/bin/bash
local_salt_dir=/opt/so/saltstack/local
zeek_logs_enabled() {
echo "zeeklogs:" > $local_salt_dir/pillar/zeeklogs.sls
echo " enabled:" >> $local_salt_dir/pillar/zeeklogs.sls
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/zeeklogs.sls
done
}
whiptail_manager_adv_service_zeeklogs() {
BLOGS=$(whiptail --title "so-zeek-logs" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \
"dce_rpc" "RPC Logs" ON \
"dhcp" "DHCP Logs" ON \
"dnp3" "DNP3 Logs" ON \
"dns" "DNS Logs" ON \
"dpd" "DPD Logs" ON \
"files" "Files Logs" ON \
"ftp" "FTP Logs" ON \
"http" "HTTP Logs" ON \
"intel" "Intel Hits Logs" ON \
"irc" "IRC Chat Logs" ON \
"kerberos" "Kerberos Logs" ON \
"modbus" "MODBUS Logs" ON \
"notice" "Zeek Notice Logs" ON \
"ntlm" "NTLM Logs" ON \
"pe" "PE Logs" ON \
"radius" "Radius Logs" ON \
"rfb" "RFB Logs" ON \
"rdp" "RDP Logs" ON \
"sip" "SIP Logs" ON \
"smb_files" "SMB Files Logs" ON \
"smb_mapping" "SMB Mapping Logs" ON \
"smtp" "SMTP Logs" ON \
"snmp" "SNMP Logs" ON \
"ssh" "SSH Logs" ON \
"ssl" "SSL Logs" ON \
"syslog" "Syslog Logs" ON \
"tunnel" "Tunnel Logs" ON \
"weird" "Zeek Weird Logs" ON \
"mysql" "MySQL Logs" ON \
"socks" "SOCKS Logs" ON \
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
local exitstatus=$?
IFS=' ' read -ra BLOGS <<< "$BLOGS"
return $exitstatus
}
whiptail_manager_adv_service_zeeklogs
return_code=$?
case $return_code in
1)
whiptail --title "so-zeek-logs" --msgbox "Cancelling. No changes have been made." 8 75
;;
255)
whiptail --title "so-zeek-logs" --msgbox "Whiptail error occured, exiting." 8 75
;;
*)
zeek_logs_enabled
;;
esac

View File

@@ -346,7 +346,6 @@ desktop_packages:
- snappy - snappy
- sound-theme-freedesktop - sound-theme-freedesktop
- soundtouch - soundtouch
- securityonion-networkminer
- speech-dispatcher - speech-dispatcher
- speech-dispatcher-espeak-ng - speech-dispatcher-espeak-ng
- speex - speex
@@ -433,6 +432,10 @@ desktop_packages:
- xorg-x11-xinit-session - xorg-x11-xinit-session
- zip - zip
install_networkminer:
pkg.latest:
- name: securityonion-networkminer
{% else %} {% else %}
desktop_packages_os_fail: desktop_packages_os_fail:

View File

@@ -6,6 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# include ssl since docker service requires the intca
include:
- ssl
dockergroup: dockergroup:
group.present: group.present:
@@ -86,6 +89,11 @@ docker_running:
- enable: True - enable: True
- watch: - watch:
- file: docker_daemon - file: docker_daemon
- x509: trusttheca
- require:
- file: docker_daemon
- x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present # Reserve OS ports for Docker proxy in case boot settings are not already applied/present
# 57314 = Strelka, 47760-47860 = Zeek # 57314 = Strelka, 47760-47860 = Zeek

View File

@@ -6,6 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% set node_data = salt['pillar.get']('node_data') %}
# Add EA Group # Add EA Group
elasticfleetgroup: elasticfleetgroup:
@@ -67,6 +68,7 @@ eapackageupgrade:
- source: salt://elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade - source: salt://elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade
- user: 947 - user: 947
- group: 939 - group: 939
- mode: 755
- template: jinja - template: jinja
{% if GLOBALS.role != "so-fleet" %} {% if GLOBALS.role != "so-fleet" %}
@@ -92,13 +94,53 @@ eaintegration:
- user: 947 - user: 947
- group: 939 - group: 939
eaoptionalintegrationsdir:
file.directory:
- name: /opt/so/conf/elastic-fleet/integrations-optional
- user: 947
- group: 939
- makedirs: True
{% for minion in node_data %}
{% set role = node_data[minion]["role"] %}
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
{% set optional_integrations = salt['pillar.get']('elasticfleet:optional_integrations', {}) %}
{% set integration_keys = salt['pillar.get']('elasticfleet:optional_integrations', {}).keys() %}
fleet_server_integrations_{{ minion }}:
file.directory:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}
- user: 947
- group: 939
- makedirs: True
{% for integration in integration_keys %}
{% if 'enabled_nodes' in optional_integrations[integration]%}
{% set enabled_nodes = optional_integrations[integration]["enabled_nodes"] %}
{% if minion in enabled_nodes %}
optional_integrations_dynamic_{{ minion }}_{{ integration }}:
file.managed:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json
- source: salt://elasticfleet/files/integrations-optional/{{ integration }}.json
- user: 947
- group: 939
- template: jinja
- defaults:
NAME: {{ minion }}
{% else %}
optional_integrations_dynamic_{{ minion }}_{{ integration }}_delete:
file.absent:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}
ea-integrations-load: ea-integrations-load:
file.absent: file.absent:
- name: /opt/so/state/eaintegrations.txt - name: /opt/so/state/eaintegrations.txt
- onchanges: - onchanges:
- file: eaintegration - file: eaintegration
- file: eadynamicintegration - file: eadynamicintegration
- file: eapackageupgrade - file: /opt/so/conf/elastic-fleet/integrations-optional/*
{% endif %} {% endif %}
{% else %} {% else %}

View File

@@ -30,18 +30,26 @@ elasticfleet:
packages: packages:
- apache - apache
- auditd - auditd
- auth0
- aws - aws
- azure - azure
- barracuda - barracuda
- carbonblack_edr
- checkpoint
- cisco_asa - cisco_asa
- cisco_duo
- cisco_meraki
- cisco_umbrella
- cloudflare - cloudflare
- crowdstrike - crowdstrike
- darktrace - darktrace
- elastic_agent
- elasticsearch - elasticsearch
- endpoint - endpoint
- f5_bigip - f5_bigip
- fleet_server
- fim - fim
- fireeye
- fleet_server
- fortinet - fortinet
- fortinet_fortigate - fortinet_fortigate
- gcp - gcp
@@ -57,24 +65,38 @@ elasticfleet:
- m365_defender - m365_defender
- microsoft_defender_endpoint - microsoft_defender_endpoint
- microsoft_dhcp - microsoft_dhcp
- mimecast
- netflow - netflow
- o365 - o365
- okta - okta
- osquery_manager - osquery_manager
- panw - panw
- pfsense - pfsense
- pulse_connect_secure
- redis - redis
- sentinel_one - sentinel_one
- snyk
- sonicwall_firewall - sonicwall_firewall
- sophos
- sophos_central
- symantec_endpoint - symantec_endpoint
- system - system
- tcp - tcp
- tenable_sc
- ti_abusech - ti_abusech
- ti_misp - ti_misp
- ti_otx - ti_otx
- ti_recordedfuture - ti_recordedfuture
- udp - udp
- vsphere
- windows - windows
- zscaler_zia - zscaler_zia
- zscaler_zpa - zscaler_zpa
- 1password - 1password
optional_integrations:
sublime_platform:
enabled_nodes: []
api_key:
base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m
limit: 100

View File

@@ -96,6 +96,17 @@ so-elastic-fleet:
{% endif %} {% endif %}
{% if GLOBALS.role != "so-fleet" %} {% if GLOBALS.role != "so-fleet" %}
so-elastic-fleet-package-statefile:
file.managed:
- name: /opt/so/state/elastic_fleet_packages.txt
- contents: {{ELASTICFLEETMERGED.packages}}
so-elastic-fleet-package-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade
- onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations: so-elastic-fleet-integrations:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load - name: /usr/sbin/so-elastic-fleet-integration-policy-load

View File

@@ -0,0 +1,44 @@
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED -%}
{%- from 'sensoroni/map.jinja' import SENSORONIMERGED -%}
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- raw -%}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "sublime-platform",
"namespace": "default",
"description": "",
"policy_id": "FleetServer_{%- endraw -%}{{ NAME }}{%- raw -%}",
"vars": {},
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"request_method": "GET",
"processors": "- drop_event:\n when:\n not:\n contains: \n message: \"flagged_rules\"\n- decode_json_fields:\n fields: [\"message\"]\n document_id: id\n target: \"\"",
"enable_request_tracer": false,
"oauth_scopes": [],
"request_transforms": "- set:\n target: header.Authorization\n value: 'Bearer {% endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.api_key }}{%- raw -%}'\n- set:\n target: header.accept\n value: application/json\n- set:\n target: url.params.last_message_created_at[gte]\n value: '[[formatDate (now (parseDuration \"-{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.poll_interval }}{%- raw -%}\")) \"2006-01-02T15:04:05Z\"]]'\n- set:\n target: url.params.reviewed\n value: false\n- set:\n target: url.params.flagged\n value: true\n- set:\n target: url.params.limit\n value: {% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.limit }}{%- raw -%}",
"response_transforms": "",
"request_redirect_headers_ban_list": [],
"request_encode_as": "application/x-www-form-urlencoded",
"request_url": "{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.base_url }}{%- raw -%}/v0/message-groups",
"response_split": "target: body.message_groups\ntype: array\nkeep_parent: false\ntransforms:\n - set:\n target: body.sublime.request_url\n value : '[[ .last_response.url.value ]]'",
"tags": [
"forwarded"
],
"pipeline": "sublime",
"data_stream.dataset": "sublime",
"request_interval": "1m"
}
}
}
}
}
}
{%- endraw -%}

View File

@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "8.8.0" "version": "8.10.2"
}, },
"enabled": true, "enabled": true,
"policy_id": "endpoints-initial", "policy_id": "endpoints-initial",

View File

@@ -40,3 +40,36 @@ elasticfleet:
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
sensitive: True sensitive: True
advanced: True advanced: True
optional_integrations:
sublime_platform:
enabled_nodes:
description: Fleet nodes with the Sublime Platform integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"
api_key:
description: API key for Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
base_url:
description: Base URL for Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for alerts from Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
limit:
description: The maximum number of message groups to return from Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: int

View File

@@ -68,6 +68,19 @@ elastic_fleet_integration_update() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
} }
elastic_fleet_integration_policy_upgrade() {
INTEGRATION_ID=$1
JSON_STRING=$( jq -n \
--arg INTEGRATIONID "$INTEGRATION_ID" \
'{"packagePolicyIds":[$INTEGRATIONID]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_package_version_check() { elastic_fleet_package_version_check() {
PACKAGE=$1 PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version' curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'

View File

@@ -14,12 +14,8 @@ do
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n" printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
if [ "$1" = "--force" ]; then printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
printf "\n\nIntegration $NAME exists - Updating integration\n" elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
else
printf "\n\nIntegration $NAME exists - Not updating - rerun with --force to force the update.\n"
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
elastic_fleet_integration_create "@$INTEGRATION" elastic_fleet_integration_create "@$INTEGRATION"

View File

@@ -64,8 +64,28 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
if [[ "$RETURN_CODE" != "1" ]]; then if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt touch /opt/so/state/eaintegrations.txt
fi fi
# Fleet Server - Optional integrations
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json
do
if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then
FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7`
printf "\n\nFleet Server Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
else
printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then
elastic_fleet_integration_create "@$INTEGRATION"
fi
fi
fi
done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
else else
exit $RETURN_CODE exit $RETURN_CODE
fi fi

View File

@@ -8,8 +8,19 @@
INTCA=/etc/pki/tls/certs/intca.crt INTCA=/etc/pki/tls/certs/intca.crt
. /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common . /usr/sbin/so-elastic-fleet-common
# Check to make sure that Kibana API is up & ready
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
printf "Kibana API not accessible, exiting Elastic Fleet setup..."
exit 1
fi
printf "\n### Create ES Token ###\n" printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value) ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
@@ -120,3 +131,4 @@ salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node # Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers so-elastic-agent-gen-installers
salt-call state.apply elasticfleet.install_agent_grid queue=True salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0

View File

@@ -20,20 +20,12 @@
{% for NODE in ES_LOGSTASH_NODES %} {% for NODE in ES_LOGSTASH_NODES %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %}
{% endfor %} {% endfor %}
{% if grains.id.split('_') | last == 'manager' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master','data','remote_cluster_client','transform']}) %}
{% else %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data_hot', 'remote_cluster_client']}) %}
{% endif %}
{% endif %} {% endif %}
{% elif grains.id.split('_') | last == 'searchnode' %} {% elif grains.id.split('_') | last == 'searchnode' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['data_hot', 'ingest']}) %}
{% if HIGHLANDER %} {% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %}
{% endif %} {% endif %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %}
{% elif grains.id.split('_') | last == 'heavynode' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data', 'remote_cluster_client', 'ingest']}) %}
{% endif %} {% endif %}
{% if HIGHLANDER %} {% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %}
@@ -53,3 +45,5 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% do ELASTICSEARCHMERGED.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].config.node.roles}) %}

File diff suppressed because it is too large Load Diff

View File

@@ -110,7 +110,7 @@ escomponenttemplates:
- group: 939 - group: 939
- clean: True - clean: True
- onchanges_in: - onchanges_in:
- cmd: so-elasticsearch-templates - file: so-elasticsearch-templates-reload
# Auto-generate templates from defaults file # Auto-generate templates from defaults file
{% for index, settings in ES_INDEX_SETTINGS.items() %} {% for index, settings in ES_INDEX_SETTINGS.items() %}
@@ -123,7 +123,7 @@ es_index_template_{{index}}:
TEMPLATE_CONFIG: {{ settings.index_template }} TEMPLATE_CONFIG: {{ settings.index_template }}
- template: jinja - template: jinja
- onchanges_in: - onchanges_in:
- cmd: so-elasticsearch-templates - file: so-elasticsearch-templates-reload
{% endif %} {% endif %}
{% endfor %} {% endfor %}
@@ -142,7 +142,7 @@ es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
- user: 930 - user: 930
- group: 939 - group: 939
- onchanges_in: - onchanges_in:
- cmd: so-elasticsearch-templates - file: so-elasticsearch-templates-reload
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -167,6 +167,10 @@ so-elasticsearch-ilm-policy-load:
- onchanges: - onchanges:
- file: so-elasticsearch-ilm-policy-load-script - file: so-elasticsearch-ilm-policy-load-script
so-elasticsearch-templates-reload:
file.absent:
- name: /opt/so/state/estemplates.txt
so-elasticsearch-templates: so-elasticsearch-templates:
cmd.run: cmd.run:
- name: /usr/sbin/so-elasticsearch-templates-load - name: /usr/sbin/so-elasticsearch-templates-load

View File

@@ -0,0 +1,34 @@
{
"description" : " Email alerts from Sublime",
"processors" : [
{ "set": { "field": "event.module", "value": "sublime" } },
{ "set": { "field": "event.dataset", "value": "alert" } },
{ "set": { "field": "event.severity", "value": 3, "override": true } },
{ "set": { "field": "rule.name", "value": "Sublime Platform: {{ flagged_rules.0.name }}", "override": true } },
{ "set": { "field": "sublime.message_group_id", "value": "{{ _id }}", "override": true } },
{ "set": { "field": "email.address", "value": "{{ messages.0.recipients.0.email }}", "override": true } },
{ "set": { "field": "email.forwarded_recipents", "value": "{{ messages.0.forwarded_receipients }}", "override": true } },
{ "set": { "field": "email.sender.address", "value": "{{ messages.0.sender.email }}", "override": true } },
{ "set": { "field": "email.subject", "value": "{{ messages.0.subject }}", "override": true } },
{ "set": { "field": "email.forwarded_at", "value": "{{ messages.0.forwarded_at }}", "override": true } },
{ "set": { "field": "email.created_at", "value": "{{ messages.0.created_at }}", "override": true } },
{ "set": { "field": "email.read_at", "value": "{{ messages.0.read_at }}", "override": true } },
{ "set": { "field": "email.replied_at", "value": "{{ messages.0.replied_at }}", "override": true } },
{
"grok": {
"field": "sublime.request_url",
"patterns": ["^https://api.%{DATA:sublime_host}/v0%{GREEDYDATA}$"],
"ignore_failure": true
}
},
{ "rename": { "field": "sublime_host", "target_field": "sublime.url", "ignore_missing": true } },
{ "rename": { "field": "data", "target_field": "sublime", "ignore_missing": true } },
{ "rename": { "field": "flagged_rules", "target_field": "sublime.flagged_rules", "ignore_missing": true } },
{ "rename": { "field": "organization_id", "target_field": "sublime.organization_id", "ignore_missing": true } },
{ "rename": { "field": "review_status", "target_field": "sublime.review_status", "ignore_missing": true } },
{ "rename": { "field": "state", "target_field": "sublime.state", "ignore_missing": true } },
{ "rename": { "field": "user_reports", "target_field": "sublime.user_reports", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -2,6 +2,7 @@
"description" : "suricata.common", "description" : "suricata.common",
"processors" : [ "processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } }, { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } }, { "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } }, { "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },

View File

@@ -33,7 +33,6 @@ elasticsearch:
flood_stage: flood_stage:
description: The max percentage of used disk space that will cause the node to take protective actions, such as blocking incoming events. description: The max percentage of used disk space that will cause the node to take protective actions, such as blocking incoming events.
helpLink: elasticsearch.html helpLink: elasticsearch.html
script: script:
max_compilations_rate: max_compilations_rate:
description: Max rate of script compilations permitted in the Elasticsearch cluster. Larger values will consume more resources. description: Max rate of script compilations permitted in the Elasticsearch cluster. Larger values will consume more resources.
@@ -57,32 +56,6 @@ elasticsearch:
forcedType: int forcedType: int
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
so-logs: &indexSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
global: True
helpLink: elasticsearch.html
index_template:
index_patterns:
description: Patterns for matching multiple indices or tables.
forceType: "[]string"
multiline: True
global: True
helpLink: elasticsearch.html
template:
settings:
index:
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
forcedType: int
global: True
helpLink: elasticsearch.html
mapping:
total_fields:
limit:
description: Max number of fields that can exist on a single index. Larger values will consume more resources.
global: True
helpLink: elasticsearch.html
refresh_interval: refresh_interval:
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation. description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
global: True global: True
@@ -100,48 +73,10 @@ elasticsearch:
description: The order to sort by. Must set index_sorting to True. description: The order to sort by. Must set index_sorting to True.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
mappings:
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
forcedType: "[]string"
global: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
forcedType: int
global: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
forcedType: bool
global: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
forcedType: bool
global: True
helpLink: elasticsearch.html
policy:
phases: phases:
hot: hot:
min_age: max_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier. description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
actions: actions:
@@ -160,10 +95,187 @@ elasticsearch:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
global: True
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^\[0-9\]{1,5}d$
forcedType: string
global: True
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
helpLink: elasticsearch.html
delete:
min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
global: True
advanced: True
helpLink: elasticsearch.html
index_template:
index_patterns:
description: Patterns for matching multiple indices or tables.
forceType: "[]string"
multiline: True
global: True
advanced: True
helpLink: elasticsearch.html
template:
settings:
index:
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
mapping:
total_fields:
limit:
description: Max number of fields that can exist on a single index. Larger values will consume more resources.
global: True
advanced: True
helpLink: elasticsearch.html
refresh_interval:
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
global: True
advanced: True
helpLink: elasticsearch.html
number_of_shards:
description: Number of shards required for this index. Using multiple shards increases fault tolerance, but also increases storage and network costs.
global: True
advanced: True
helpLink: elasticsearch.html
sort:
field:
description: The field to sort by. Must set index_sorting to True.
global: True
advanced: True
helpLink: elasticsearch.html
order:
description: The order to sort by. Must set index_sorting to True.
global: True
advanced: True
helpLink: elasticsearch.html
mappings:
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
forcedType: "[]string"
global: True
advanced: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
policy:
phases:
hot:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
rollover:
max_age:
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
max_primary_shard_size:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
rollover:
max_age:
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
max_primary_shard_size:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
cold: cold:
min_age: min_age:
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True global: True
advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
actions: actions:
set_priority: set_priority:
@@ -171,26 +283,31 @@ elasticsearch:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int forcedType: int
global: True global: True
advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
delete: delete:
min_age: min_age:
description: Minimum age of index. This determines when the index should be deleted. description: Minimum age of index. This determines when the index should be deleted.
global: True global: True
advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
_meta: _meta:
package: package:
name: name:
description: Meta settings for the mapping. description: Meta settings for the mapping.
global: True global: True
advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
managed_by: managed_by:
description: Meta settings for the mapping. description: Meta settings for the mapping.
global: True global: True
advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
managed: managed:
description: Meta settings for the mapping. description: Meta settings for the mapping.
forcedType: bool forcedType: bool
global: True global: True
advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
so-logs-system_x_auth: *indexSettings so-logs-system_x_auth: *indexSettings
so-logs-system_x_syslog: *indexSettings so-logs-system_x_syslog: *indexSettings
@@ -345,3 +462,19 @@ elasticsearch:
so-strelka: *indexSettings so-strelka: *indexSettings
so-syslog: *indexSettings so-syslog: *indexSettings
so-zeek: *indexSettings so-zeek: *indexSettings
so_roles:
so-manager: &soroleSettings
config:
node:
roles:
description: List of Elasticsearch roles that the node should have. Blank assumes all roles
forcedType: "[]string"
global: False
advanced: True
helpLink: elasticsearch.html
so-managersearch: *soroleSettings
so-standalone: *soroleSettings
so-searchnode: *soroleSettings
so-heavynode: *soroleSettings
so-eval: *soroleSettings
so-import: *soroleSettings

View File

@@ -5,7 +5,7 @@
"name": "logs" "name": "logs"
}, },
"codec": "best_compression", "codec": "best_compression",
"default_pipeline": "logs-elastic_agent-1.7.0", "default_pipeline": "logs-elastic_agent-1.13.1",
"mapping": { "mapping": {
"total_fields": { "total_fields": {
"limit": "10000" "limit": "10000"

View File

@@ -6,8 +6,6 @@
. /usr/sbin/so-common . /usr/sbin/so-common
RETURN_CODE=0
ELASTICSEARCH_HOST=$1 ELASTICSEARCH_HOST=$1
ELASTICSEARCH_PORT=9200 ELASTICSEARCH_PORT=9200
@@ -15,40 +13,20 @@ ELASTICSEARCH_PORT=9200
ELASTICSEARCH_INGEST_PIPELINES="/opt/so/conf/elasticsearch/ingest/" ELASTICSEARCH_INGEST_PIPELINES="/opt/so/conf/elasticsearch/ingest/"
# Wait for ElasticSearch to initialize # Wait for ElasticSearch to initialize
if [ ! -f /opt/so/state/espipelines.txt ]; then if [ ! -f /opt/so/state/espipelines.txt ]; then
echo "State file /opt/so/state/espipelines.txt not found. Running so-elasticsearch-pipelines."
echo -n "Waiting for ElasticSearch..." echo -n "Waiting for ElasticSearch..."
COUNT=0 retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
fi
cd ${ELASTICSEARCH_INGEST_PIPELINES} cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..." echo "Loading pipelines..."
for i in .[a-z]* *; do echo $i; RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done for i in .[a-z]* *;
do
echo $i;
retry 5 5 "so-elasticsearch-query _ingest/pipeline/$i -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load pipeline: $i"
done
echo echo
cd - >/dev/null cd - >/dev/null
if [[ "$RETURN_CODE" != "1" ]]; then touch /opt/so/state/espipelines.txt
touch /opt/so/state/espipelines.txt
fi
else
exit $RETURN_CODE
fi fi

View File

@@ -7,103 +7,157 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %} {%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
. /usr/sbin/so-common STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
{% if GLOBALS.role != 'so-heavynode' %} STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
. /usr/sbin/so-elastic-fleet-common
{% endif %}
default_conf_dir=/opt/so/conf if [[ -f $STATE_FILE_INITIAL ]]; then
# The initial template load has already run. As this is a subsequent load, all dependencies should
# Define a default directory to load pipelines from # already be satisified. Therefore, immediately exit/abort this script upon any template load failure
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/" # since this is an unrecoverable failure.
should_exit_on_failure=1
{% if GLOBALS.role == 'so-heavynode' %}
file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json"
{% else %}
file="/usr/sbin/so-elastic-fleet-common"
{% endif %}
if [ -f "$file" ]; then
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query / -k --output /dev/null --silent --head --fail
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit 1
fi
{% if GLOBALS.role != 'so-heavynode' %}
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
if [ "$INSTALLED" != "installed" ]; then
echo
echo "Packages not yet installed."
echo
exit 0
fi
{% endif %}
set -e
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
echo "Loading ECS component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE-mappings"; so-elasticsearch-query _component_template/$TEMPLATE-mappings -d@$i -XPUT 2>/dev/null; echo; done
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
echo "Loading Elastic Agent component templates..."
{% if GLOBALS.role == 'so-heavynode' %}
component_pattern="so-*"
{% else %}
component_pattern="*"
{% endif %}
for i in $component_pattern; do TEMPLATE=${i::-5}; echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
# Load SO-specific component templates
cd ${ELASTICSEARCH_TEMPLATES}/component/so
echo "Loading Security Onion component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
echo
# Load SO index templates
cd ${ELASTICSEARCH_TEMPLATES}/index
echo "Loading Security Onion index templates..."
shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %}
pattern="*"
{% endif %}
for i in $pattern; do
TEMPLATE=${i::-14};
echo "$TEMPLATE";
so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT 2>/dev/null;
echo;
done
echo
else else
{% if GLOBALS.role == 'so-heavynode' %} # This is the initial template load, and there likely are some components not yet setup in Elasticsearch.
echo "Common template does not exist. Exiting..." # Therefore load as many templates as possible at this time and if an error occurs proceed to the next
{% else %} # template. But if at least one template fails to load do not mark the templates as having been loaded.
echo "Elastic Fleet not configured. Exiting..." # This will allow the next load to resume the load of the templates that failed to load initially.
{% endif %} should_exit_on_failure=0
exit 0 echo "This is the initial template load"
fi
load_failures=0
load_template() {
uri=$1
file=$2
echo "Loading template file $i"
if ! retry 3 5 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then
if [[ $should_exit_on_failure -eq 1 ]]; then
fail "Could not load template file: $file"
else
load_failures=$((load_failures+1))
echo "Incremented load failure counter: $load_failures"
fi
fi
}
if [ ! -f $STATE_FILE_SUCCESS ]; then
echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load."
. /usr/sbin/so-common
{% if GLOBALS.role != 'so-heavynode' %}
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
. /usr/sbin/so-elastic-fleet-common
fi
{% endif %}
default_conf_dir=/opt/so/conf
# Define a default directory to load pipelines from
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
{% if GLOBALS.role == 'so-heavynode' %}
file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json"
{% else %}
file="/usr/sbin/so-elastic-fleet-common"
{% endif %}
if [ -f "$file" ]; then
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
{% if GLOBALS.role != 'so-heavynode' %}
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
if [ "$INSTALLED" != "installed" ]; then
echo
echo "Packages not yet installed."
echo
exit 0
fi
{% endif %}
touch $STATE_FILE_INITIAL
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
echo "Loading ECS component templates..."
for i in *; do
TEMPLATE=$(echo $i | cut -d '.' -f1)
load_template "_component_template/${TEMPLATE}-mappings" "$i"
done
echo
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
echo "Loading Elastic Agent component templates..."
{% if GLOBALS.role == 'so-heavynode' %}
component_pattern="so-*"
{% else %}
component_pattern="*"
{% endif %}
for i in $component_pattern; do
TEMPLATE=${i::-5}
load_template "_component_template/$TEMPLATE" "$i"
done
echo
# Load SO-specific component templates
cd ${ELASTICSEARCH_TEMPLATES}/component/so
echo "Loading Security Onion component templates..."
for i in *; do
TEMPLATE=$(echo $i | cut -d '.' -f1);
load_template "_component_template/$TEMPLATE" "$i"
done
echo
# Load SO index templates
cd ${ELASTICSEARCH_TEMPLATES}/index
echo "Loading Security Onion index templates..."
shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %}
pattern="*"
{% endif %}
# Index templates will be skipped if the following conditions are met:
# 1. The template is part of the "so-logs-" template group
# 2. The template name does not correlate to at least one existing component template
# In this situation, the script will treat the skipped template as a temporary failure
# and allow the templates to be loaded again on the next run or highstate, whichever
# comes first.
COMPONENT_LIST=$(so-elasticsearch-component-templates-list)
for i in $pattern; do
TEMPLATE=${i::-14}
COMPONENT_PATTERN=${TEMPLATE:3}
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -v osquery)
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then
load_failures=$((load_failures+1))
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
else
load_template "_index_template/$TEMPLATE" "$i"
fi
done
else
{% if GLOBALS.role == 'so-heavynode' %}
echo "Common template does not exist. Exiting..."
{% else %}
echo "Elastic Fleet not configured. Exiting..."
{% endif %}
exit 0
fi
cd - >/dev/null
if [[ $load_failures -eq 0 ]]; then
echo "All template loaded successfully"
touch $STATE_FILE_SUCCESS
else
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"
fi
else
echo "Templates already loaded"
fi fi
cd - >/dev/null

View File

@@ -89,7 +89,6 @@ COMMIT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT -A INPUT -i lo -j ACCEPT
-A INPUT -m conntrack --ctstate INVALID -j DROP -A INPUT -m conntrack --ctstate INVALID -j DROP
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A INPUT -p icmp -j ACCEPT -A INPUT -p icmp -j ACCEPT
-A INPUT -j LOGGING -A INPUT -j LOGGING
-A FORWARD -j DOCKER-USER -A FORWARD -j DOCKER-USER
@@ -103,6 +102,7 @@ COMMIT
-A FORWARD -m conntrack --ctstate INVALID -j DROP -A FORWARD -m conntrack --ctstate INVALID -j DROP
-A FORWARD -j REJECT --reject-with icmp-host-prohibited -A FORWARD -j REJECT --reject-with icmp-host-prohibited
-A OUTPUT -o lo -j ACCEPT -A OUTPUT -o lo -j ACCEPT
# block icmp timestamp reply
-A OUTPUT -p icmp -m icmp --icmp-type 14 -j DROP -A OUTPUT -p icmp -m icmp --icmp-type 14 -j DROP
{%- for rule in D2 %} {%- for rule in D2 %}

View File

@@ -1,454 +0,0 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
"""
Local exit codes:
- General error: 1
- Invalid argument: 2
- File error: 3
"""
import sys, os, subprocess, argparse, signal
import copy
import re
import textwrap
import yaml
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
salt_proc: subprocess.CompletedProcess = None
def print_err(string: str):
print(string, file=sys.stderr)
def check_apply(args: dict, prompt: bool = True):
if args.apply:
print('Configuration updated. Applying changes:')
return apply()
else:
if prompt:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
answer = input(message)
while answer.lower() not in [ 'y', 'n', '' ]:
answer = input(message)
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying changes:')
return apply()
else:
return 0
def apply():
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'idstools.sync_files', 'queue=True']
update_cmd = ['so-rule-update']
print('Syncing config files...')
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
if cmd.returncode == 0:
print('Updating rules...')
return subprocess.run(update_cmd).returncode
else:
return cmd.returncode
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
result = []
for root, _, files in os.walk(minion_pillar_dir):
for f_minion_id in files:
if re.search(regex, f_minion_id):
result.append(os.path.join(root, f_minion_id))
if len(result) == 0:
print_err('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?')
sys.exit(3)
elif len(result) > 1:
res_arr = []
for r in result:
res_arr.append(f'\"{r}\"')
res_str = ', '.join(res_arr)
print_err('(This should not happen, the system is in an error state if you see this message.)\n')
print_err('More than one manager-type pillar exists, minion id\'s listed below:')
print_err(f' {res_str}')
sys.exit(3)
else:
return result[0]
def read_pillar(pillar: str):
try:
with open(pillar, 'r') as f:
loaded_yaml = yaml.safe_load(f.read())
if loaded_yaml is None:
print_err(f'Could not parse {pillar}')
sys.exit(3)
return loaded_yaml
except:
print_err(f'Could not open {pillar}')
sys.exit(3)
def write_pillar(pillar: str, content: dict):
try:
sids = content['idstools']['sids']
if sids['disabled'] is not None:
if len(sids['disabled']) == 0: sids['disabled'] = None
if sids['enabled'] is not None:
if len(sids['enabled']) == 0: sids['enabled'] = None
if sids['modify'] is not None:
if len(sids['modify']) == 0: sids['modify'] = None
with open(pillar, 'w') as f:
return yaml.dump(content, f, default_flow_style=False)
except Exception as e:
print_err(f'Could not open {pillar}')
sys.exit(3)
def check_sid_pattern(sid_pattern: str):
message = f'SID {sid_pattern} is not valid, did you forget the \"re:\" prefix for a regex pattern?'
if sid_pattern.startswith('re:'):
r_string = sid_pattern[3:]
if not valid_regex(r_string):
print_err('Invalid regex pattern.')
return False
else:
return True
else:
sid: int
try:
sid = int(sid_pattern)
except:
print_err(message)
return False
if sid >= 0:
return True
else:
print_err(message)
return False
def valid_regex(pattern: str):
try:
re.compile(pattern)
return True
except re.error:
return False
def sids_key_exists(pillar: dict, key: str):
return key in pillar.get('idstools', {}).get('sids', {})
def rem_from_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
arr = pillar_dict['idstools']['sids'][key]
if arr is None or val not in arr:
if not optional: print(f'{val} already does not exist in {key}')
else:
pillar_dict['idstools']['sids'][key].remove(val)
return pillar_dict
def add_to_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
if pillar_dict['idstools']['sids'][key] is None:
pillar_dict['idstools']['sids'][key] = []
if val in pillar_dict['idstools']['sids'][key]:
if not optional: print(f'{val} already exists in {key}')
else:
pillar_dict['idstools']['sids'][key].append(val)
return pillar_dict
def add_rem_disabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'disabled'):
pillar_dict['idstools']['sids']['disabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'disabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['disabled'] == pillar_dict['idstools']['sids']['disabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'enabled'):
pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern, optional=True)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is not None:
rem_candidates = []
for action in modify:
if action.startswith(f'{args.sid_pattern} '):
rem_candidates.append(action)
if len(rem_candidates) > 0:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
while answer.lower() not in [ 'y', 'n', '' ]:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
if answer.lower() in [ 'y', '' ]:
for item in rem_candidates:
modify.remove(item)
pillar_dict['idstools']['sids']['modify'] = modify
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_disabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
disabled = pillar_dict.get('idstools', {}).get('sids', {}).get('disabled')
if disabled is None:
print('No rules disabled.')
return 0
else:
print('Disabled rules:')
for rule in disabled:
print(f' - {rule}')
return 0
def add_rem_enabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'enabled'):
pillar_dict['idstools']['sids']['enabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'enabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['enabled'] == pillar_dict['idstools']['sids']['enabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_enabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
enabled = pillar_dict.get('idstools', {}).get('sids', {}).get('enabled')
if enabled is None:
print('No rules explicitly enabled.')
return 0
else:
print('Enabled rules:')
for rule in enabled:
print(f' - {rule}')
return 0
def add_rem_modify(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
if not valid_regex(args.search_term):
print_err('Search term is not a valid regex pattern.')
string_val = f'{args.sid_pattern} \"{args.search_term}\" \"{args.replace_term}\"'
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'modify'):
pillar_dict['idstools']['sids']['modify'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'modify', string_val)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'modify', string_val)
if temp_pillar_dict['idstools']['sids']['modify'] == pillar_dict['idstools']['sids']['modify']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
# TODO: Determine if a rule should be removed from disabled if modified.
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_modified_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is None:
print('No rules currently modified.')
return 0
else:
print('Modified rules + modifications:')
for rule in modify:
print(f' - {rule}')
return 0
def sigint_handler(*_):
print('Exiting gracefully on Ctrl-C')
if salt_proc is not None: salt_proc.send_signal(signal.SIGINT)
sys.exit(0)
def main():
signal.signal(signal.SIGINT, sigint_handler)
if os.geteuid() != 0:
print_err('You must run this script as root')
sys.exit(1)
apply_help='After updating rule configuration, apply the idstools state.'
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
subcommand_desc = textwrap.dedent(
"""\
disabled Manage and list disabled rules (add, remove, list)
enabled Manage and list enabled rules (add, remove, list)
modify Manage and list modified rules (add, remove, list)
"""
)
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
sid_or_regex_help = 'A valid SID (ex: "4321") or regular expression pattern (ex: "re:heartbleed|spectre")'
# Disabled actions
disabled = subparsers.add_parser('disabled')
disabled_sub = disabled.add_subparsers()
disabled_add = disabled_sub.add_parser('add')
disabled_add.set_defaults(func=add_rem_disabled)
disabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_rem = disabled_sub.add_parser('remove')
disabled_rem.set_defaults(func=add_rem_disabled, remove=True)
disabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_list = disabled_sub.add_parser('list')
disabled_list.set_defaults(func=list_disabled_rules)
# Enabled actions
enabled = subparsers.add_parser('enabled')
enabled_sub = enabled.add_subparsers()
enabled_add = enabled_sub.add_parser('add')
enabled_add.set_defaults(func=add_rem_enabled)
enabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_rem = enabled_sub.add_parser('remove')
enabled_rem.set_defaults(func=add_rem_enabled, remove=True)
enabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_list = enabled_sub.add_parser('list')
enabled_list.set_defaults(func=list_enabled_rules)
search_term_help='A properly escaped regex search term (ex: "\\\$EXTERNAL_NET")'
replace_term_help='The text to replace the search term with'
# Modify actions
modify = subparsers.add_parser('modify')
modify_sub = modify.add_subparsers()
modify_add = modify_sub.add_parser('add')
modify_add.set_defaults(func=add_rem_modify)
modify_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
modify_add.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_add.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_rem = modify_sub.add_parser('remove')
modify_rem.set_defaults(func=add_rem_modify, remove=True)
modify_rem.add_argument('sid_pattern', metavar='SID', help=sid_or_regex_help)
modify_rem.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_rem.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_list = modify_sub.add_parser('list')
modify_list.set_defaults(func=list_modified_rules)
# Begin parse + run
args = main_parser.parse_args(sys.argv[1:])
if not hasattr(args, 'remove'):
args.remove = False
args.pillar = find_minion_pillar()
if hasattr(args, 'func'):
exit_code = args.func(args)
else:
if args.command is None:
main_parser.print_help()
else:
if args.command == 'disabled':
disabled.print_help()
elif args.command == 'enabled':
enabled.print_help()
elif args.command == 'modify':
modify.print_help()
sys.exit(0)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -22,7 +22,7 @@ so-influxdb:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }} - ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment: - environment:
- INFLUXD_CONFIG_PATH=/conf - INFLUXD_CONFIG_PATH=/conf/config.yaml
- INFLUXDB_HTTP_LOG_ENABLED=false - INFLUXDB_HTTP_LOG_ENABLED=false
- DOCKER_INFLUXDB_INIT_MODE=setup - DOCKER_INFLUXDB_INIT_MODE=setup
- DOCKER_INFLUXDB_INIT_USERNAME=so - DOCKER_INFLUXDB_INIT_USERNAME=so

View File

@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.8.2","id": "8.8.2","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} {"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

View File

@@ -0,0 +1 @@
user = "{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user', 'NO_USER_SET') }}:{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass', 'NO_PW_SET') }}"

View File

@@ -63,7 +63,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))' IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do for i in "${LINES[@]}"; do
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.8.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done done

View File

@@ -51,6 +51,14 @@ kratosschema:
- group: 928 - group: 928
- mode: 600 - mode: 600
kratosoidc:
file.managed:
- name: /opt/so/conf/kratos/oidc.jsonnet
- source: salt://kratos/files/oidc.jsonnet
- user: 928
- group: 928
- mode: 600
kratosconfig: kratosconfig:
file.managed: file.managed:
- name: /opt/so/conf/kratos/kratos.yaml - name: /opt/so/conf/kratos/kratos.yaml

View File

@@ -1,5 +1,18 @@
kratos: kratos:
enabled: False enabled: False
oidc:
enabled: false
config:
id: SSO
mapper_url: file:///kratos-conf/oidc.jsonnet
subject_source: userinfo
scope:
- email
- profile
requested_claims:
id_token:
email:
essential: true
config: config:
session: session:
lifespan: 24h lifespan: 24h

View File

@@ -21,8 +21,7 @@ so-kratos:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKER.containers['so-kratos'].ip }} - ipv4_address: {{ DOCKER.containers['so-kratos'].ip }}
- binds: - binds:
- /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro - /opt/so/conf/kratos/:/kratos-conf:ro
- /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro
- /opt/so/log/kratos/:/kratos-log:rw - /opt/so/log/kratos/:/kratos-log:rw
- /nsm/kratos/db:/kratos-data:rw - /nsm/kratos/db:/kratos-data:rw
{% if DOCKER.containers['so-kratos'].custom_bind_mounts %} {% if DOCKER.containers['so-kratos'].custom_bind_mounts %}

View File

@@ -0,0 +1,8 @@
local claims = std.extVar('claims');
{
identity: {
traits: {
email: if 'email' in claims then claims.email else claims.preferred_username
},
},
}

View File

@@ -20,3 +20,19 @@
{% do KRATOSDEFAULTS.kratos.config.courier.smtp.update({'connection_uri': KRATOSDEFAULTS.kratos.config.courier.smtp.connection_uri | replace("URL_BASE", GLOBALS.url_base)}) %} {% do KRATOSDEFAULTS.kratos.config.courier.smtp.update({'connection_uri': KRATOSDEFAULTS.kratos.config.courier.smtp.connection_uri | replace("URL_BASE", GLOBALS.url_base)}) %}
{% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %} {% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %}
{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('features') %}
{% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %}
{% endif %}
{% if KRATOSMERGED.oidc.config.auth_url is defined and not KRATOSMERGED.oidc.config.auth_url.strip() | length %}
{% do KRATOSMERGED.oidc.config.pop('auth_url') %}
{% endif %}
{% if KRATOSMERGED.oidc.config.issuer_url is defined and not KRATOSMERGED.oidc.config.issuer_url.strip() | length %}
{% do KRATOSMERGED.oidc.config.pop('issuer_url') %}
{% endif %}
{% if KRATOSMERGED.oidc.config.token_url is defined and not KRATOSMERGED.oidc.config.token_url.strip() | length %}
{% do KRATOSMERGED.oidc.config.pop('token_url') %}
{% endif %}

View File

@@ -3,6 +3,90 @@ kratos:
description: You can enable or disable Kratos. description: You can enable or disable Kratos.
advanced: True advanced: True
helpLink: kratos.html helpLink: kratos.html
oidc:
enabled:
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
global: True
helpLink: oidc.html
config:
id:
description: Customize the OIDC provider name. This name appears on the login page. Required.
global: True
forcedType: string
helpLink: oidc.html
provider:
description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft"
global: True
forcedType: string
regex: "auth0|generic|github|google|microsoft"
regexFailureMessage: "Valid values are: auth0, generic, github, google, microsoft"
helpLink: oidc.html
client_id:
description: Specify the client ID, also referenced as the application ID. Required.
global: True
forcedType: string
helpLink: oidc.html
client_secret:
description: Specify the client secret. Required.
global: True
forcedType: string
helpLink: oidc.html
microsoft_tenant:
description: Specify the Microsoft Active Directory Tenant ID. Required when provider is 'microsoft'.
global: True
forcedType: string
helpLink: oidc.html
subject_source:
description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'.
global: True
forcedType: string
regex: me|userinfo
regexFailureMessage: "Valid values are: me, userinfo"
helpLink: oidc.html
auth_url:
description: Provider's auth URL. Required when provider is 'generic'.
global: True
forcedType: string
helpLink: oidc.html
issuer_url:
description: Provider's issuer URL. Required when provider is 'auth0' or 'generic'.
global: True
forcedType: string
helpLink: oidc.html
mapper_url:
description: A file path or URL in Jsonnet format, used to map OIDC claims to the Kratos schema. Defaults to an included file that maps the email claim. Note that the contents of the included file can be customized via the "OIDC Claims Mapping" setting.
advanced: True
global: True
forcedType: string
helpLink: oidc.html
token_url:
description: Provider's token URL. Required when provider is 'generic'.
global: True
forcedType: string
helpLink: oidc.html
scope:
description: List of scoped data categories to request in the authentication response. Typically 'email' and 'profile' are the minimum required scopes. However, GitHub requires `user:email', instead and Auth0 requires 'profile', 'email', and 'openid'.
global: True
forcedType: "[]string"
helpLink: oidc.html
requested_claims:
id_token:
email:
essential:
description: Specifies whether the email claim is necessary. Typically leave this value set to true.
advanced: True
global: True
helpLink: oidc.html
files:
oidc__jsonnet:
title: OIDC Claims Mapping
description: Customize the OIDC claim mappings to the Kratos schema. The default mappings include the minimum required for login functionality, so this typically does not need to be customized. Visit https://jsonnet.org for more information about this file format.
advanced: True
file: True
global: True
helpLink: oidc.html
config: config:
session: session:
lifespan: lifespan:
@@ -19,10 +103,10 @@ kratos:
methods: methods:
password: password:
enabled: enabled:
description: Set to True to enable traditional password authentication. Leave as default to ensure proper security protections remain in place. description: Set to True to enable traditional password authentication to SOC. Typically set to true, except when exclusively using OIDC authentication. Some external tool interfaces may not be accessible if local password authentication is disabled.
global: True global: True
advanced: True advanced: True
helpLink: kratos.html helpLink: oidc.html
config: config:
haveibeenpwned_enabled: haveibeenpwned_enabled:
description: Set to True to check if a newly chosen password has ever been found in a published list of previously-compromised passwords. Requires outbound Internet connectivity when enabled. description: Set to True to check if a newly chosen password has ever been found in a published list of previously-compromised passwords. Requires outbound Internet connectivity when enabled.
@@ -30,7 +114,7 @@ kratos:
helpLink: kratos.html helpLink: kratos.html
totp: totp:
enabled: enabled:
description: Set to True to enable Time-based One-Time Password (TOTP) multi-factor authentication (MFA). Enable to ensure proper security protections remain in place. Be aware that disabling this setting, after users have already setup TOTP, may prevent users from logging in. description: Set to True to enable Time-based One-Time Password (TOTP) multi-factor authentication (MFA) to SOC. Enable to ensure proper security protections remain in place. Be aware that disabling this setting, after users have already setup TOTP, may prevent users from logging in.
global: True global: True
helpLink: kratos.html helpLink: kratos.html
config: config:
@@ -41,7 +125,7 @@ kratos:
helpLink: kratos.html helpLink: kratos.html
webauthn: webauthn:
enabled: enabled:
description: Set to True to enable Security Keys (WebAuthn / PassKeys) for passwordless or multi-factor authentication (MFA) logins. Security Keys are a Public-Key Infrastructure (PKI) based authentication method, typically involving biometric hardware devices, such as laptop fingerprint scanners and USB hardware keys. Be aware that disabling this setting, after users have already setup their accounts with Security Keys, may prevent users from logging in. description: Set to True to enable Security Keys (WebAuthn / PassKeys) for passwordless or multi-factor authentication (MFA) SOC logins. Security Keys are a Public-Key Infrastructure (PKI) based authentication method, typically involving biometric hardware devices, such as laptop fingerprint scanners and USB hardware keys. Be aware that disabling this setting, after users have already setup their accounts with Security Keys, may prevent users from logging in.
global: True global: True
helpLink: kratos.html helpLink: kratos.html
config: config:
@@ -65,6 +149,7 @@ kratos:
global: True global: True
advanced: True advanced: True
helpLink: kratos.html helpLink: kratos.html
flows: flows:
settings: settings:
privileged_session_max_age: privileged_session_max_age:

View File

@@ -11,7 +11,10 @@ input {
} }
} }
filter { filter {
if ![metadata] {
mutate { mutate {
rename => {"@metadata" => "metadata"} rename => {"@metadata" => "metadata"}
} }
} }
}

View File

@@ -13,10 +13,11 @@ input {
filter { filter {
if "fleet-lumberjack-input" in [tags] { if ![metadata] {
mutate { mutate {
rename => {"@metadata" => "metadata"} rename => {"@metadata" => "metadata"}
} }
} }
} }

View File

@@ -1,13 +1,16 @@
output { output {
if "elastic-agent" in [tags] { if "elastic-agent" in [tags] {
if [metadata][pipeline] { if [metadata][pipeline] {
if [metadata][_id] {
elasticsearch { elasticsearch {
hosts => "{{ GLOBALS.manager }}" hosts => "{{ GLOBALS.manager }}"
ecs_compatibility => v8 ecs_compatibility => v8
data_stream => true data_stream => true
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
document_id => "%{[metadata][_id]}"
pipeline => "%{[metadata][pipeline]}" pipeline => "%{[metadata][pipeline]}"
silence_errors_in_log => ["version_conflict_engine_exception"]
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
} }
@@ -19,10 +22,22 @@ output {
data_stream => true data_stream => true
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
pipeline => "%{[metadata][pipeline]}"
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
} }
} }
}
else {
elasticsearch {
hosts => "{{ GLOBALS.manager }}"
ecs_compatibility => v8
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
ssl => true
ssl_certificate_verification => false
}
}
} }
} }

View File

@@ -16,6 +16,7 @@ include:
- kibana.secrets - kibana.secrets
- manager.sync_es_users - manager.sync_es_users
- manager.elasticsearch - manager.elasticsearch
- manager.kibana
repo_log_dir: repo_log_dir:
file.directory: file.directory:
@@ -60,6 +61,8 @@ manager_sbin:
- user: 939 - user: 939
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
- exclude_pat:
- "*_test.py"
yara_update_scripts: yara_update_scripts:
file.recurse: file.recurse:

8
salt/manager/kibana.sls Normal file
View File

@@ -0,0 +1,8 @@
kibana_curl_config_distributed:
file.managed:
- name: /opt/so/conf/kibana/curl.config
- source: salt://kibana/files/curl.config.template
- template: jinja
- mode: 600
- show_changes: False
- makedirs: True

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
echo ""
echo "Hosts/Networks that have access to login to the Security Onion Console:"
so-firewall includedhosts analyst

View File

@@ -1,19 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Copyright 2014-2023 Security Onion Solutions, LLC # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# This program is free software: you can redistribute it and/or modify # https://securityonion.net/license; you may not use this file except in compliance with the
# it under the terms of the GNU General Public License as published by # Elastic License 2.0.
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os import os
import subprocess import subprocess

View File

@@ -406,12 +406,17 @@ function update_logstash_outputs() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
} }
function checkMine() {
local func=$1
# make sure the minion sees itself in the mine since it needs to see itself for states as opposed to using salt-run
retry 20 1 "salt '$MINION_ID' mine.get '\*' '$func'" "$MINION_ID"
}
function updateMine() { function updateMine() {
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC" retry 20 1 "salt '$MINION_ID' mine.update" True
}
function apply_ES_state() {
salt-call state.apply elasticsearch concurrent=True
} }
function createEVAL() { function createEVAL() {
is_pcaplimit=true is_pcaplimit=true
add_elasticsearch_to_minion add_elasticsearch_to_minion
@@ -547,8 +552,6 @@ function createSEARCHNODE() {
add_elasticsearch_to_minion add_elasticsearch_to_minion
add_logstash_to_minion add_logstash_to_minion
add_telegraf_to_minion add_telegraf_to_minion
updateMine
apply_ES_state
} }
function createRECEIVER() { function createRECEIVER() {
@@ -563,6 +566,19 @@ function createDESKTOP() {
} }
function testConnection() { function testConnection() {
# the minion should be trying to auth every 10 seconds so 15 seconds should be more than enough time to see this in the log
# this retry was put in because it is possible that a minion is attempted to be pinged before it has authenticated and connected to the Salt master
# causing the first ping to fail and typically wouldn't be successful until the second ping
# this check may pass without the minion being authenticated if it was previously connected and the line exists in the log
retry 15 1 "grep 'Authentication accepted from $MINION_ID' /opt/so/log/salt/master"
local retauth=$?
if [[ $retauth != 0 ]]; then
echo "The Minion did not authenticate with the Salt master in the allotted time"
echo "Deleting the key"
deleteminion
exit 1
fi
retry 15 3 "salt '$MINION_ID' test.ping" True retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$? local ret=$?
if [[ $ret != 0 ]]; then if [[ $ret != 0 ]]; then
@@ -582,9 +598,9 @@ if [[ "$OPERATION" = 'delete' ]]; then
deleteminion deleteminion
fi fi
if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then
# Skip this if its setup # Skip this if its setup
if [ $OPERATION != 'setup' ]; then if [[ $OPERATION == 'add' ]]; then
# Accept the salt key # Accept the salt key
acceptminion acceptminion
# Test to see if the minion was accepted # Test to see if the minion was accepted
@@ -605,8 +621,26 @@ if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
else else
add_sensoroni_to_minion add_sensoroni_to_minion
fi fi
create$NODETYPE create$NODETYPE
echo "Minion file created for $MINION_ID" echo "Minion file created for $MINION_ID"
if [[ "$OPERATION" == 'add' ]]; then
# tell the minion to populate the mine with data from mine_functions which is populated during setup
# this only needs to happen on non managers since they handle this during setup
# and they need to wait for ca creation to update the mine
updateMine
checkMine "network.ip_addrs"
# apply the elasticsearch state to the manager if a new searchnode was added
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
# calls so-common and set_minionid sets MINIONID to local minion id
set_minionid
salt $MINIONID state.apply elasticsearch queue=True --async
salt $MINIONID state.apply soc queue=True --async
fi
# run this async so the cli doesn't wait for a return
salt "$MINION_ID" state.highstate --async queue=True
fi
fi fi
if [[ "$OPERATION" = 'test' ]]; then if [[ "$OPERATION" = 'test' ]]; then

View File

@@ -235,8 +235,8 @@ function updatePassword() {
# Update DB with new hash # Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
# Deactivate MFA # Deactivate MFA
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password" [[ $? != 0 ]] && fail "Unable to update password"
fi fi
} }
@@ -341,14 +341,19 @@ function syncElastic() {
" and ic.identity_id=i.id " \ " and ic.identity_id=i.id " \
" and ict.id=ic.identity_credential_type_id " \ " and ict.id=ic.identity_credential_type_id " \
" and ict.name='password' " \ " and ict.name='password' " \
" and instr(ic.config, 'hashed_password') " \
" and i.state == 'active' " \ " and i.state == 'active' " \
"order by ici.identifier;" | \ "order by ici.identifier;" | \
sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath") sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath")
[[ $? != 0 ]] && fail "Unable to read credential hashes from database" [[ $? != 0 ]] && fail "Unable to read credential hashes from database"
echo "${userData}" | \
jq -r '.user + ":" + .data.hashed_password' \ user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password')
>> "$usersTmpFile" if lookup_salt_value "licensed_features" "" "pillar" | grep -x oidc; then
# generate random placeholder salt/hash for users without passwords
random_crypt=$(get_random_value 53)
user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/")
fi
echo "${user_data_formatted}" >> "$usersTmpFile"
# Append the user roles # Append the user roles
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do while IFS="" read -r rolePair || [ -n "$rolePair" ]; do

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import os
import sys
import time
import yaml
lockFile = "/tmp/so-yaml.lock"
def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' remove - Removes a yaml top-level key, if it exists. Requires KEY arg.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - Top level key only, does not support dot-notations for nested keys at this time. Ex: level1')
sys.exit(1)
def loadYaml(filename):
file = open(filename, "r")
content = file.read()
return yaml.safe_load(content)
def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def remove(args):
if len(args) != 2:
print('Missing filename or key arg', file=sys.stderr)
showUsage(None)
return
filename = args[0]
content = loadYaml(filename)
content.pop(args[1], None)
writeYaml(filename, content)
return 0
def main():
args = sys.argv[1:]
if len(args) < 1:
showUsage(None)
return
commands = {
"help": showUsage,
"remove": remove,
}
code = 1
try:
lockAttempts = 0
maxAttempts = 30
while lockAttempts < maxAttempts:
lockAttempts = lockAttempts + 1
try:
f = open(lockFile, "x")
f.close()
break
except Exception:
if lockAttempts == 1:
print("Waiting for lock file to be released from another process...")
time.sleep(2)
if lockAttempts == maxAttempts:
print("Lock file (" + lockFile + ") could not be created; proceeding without lock.")
cmd = commands.get(args[0], showUsage)
code = cmd(args[1:])
finally:
if os.path.exists(lockFile):
os.remove(lockFile)
sys.exit(code)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,77 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
import unittest
import importlib
soyaml = importlib.import_module("so-yaml")
class TestRemove(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd"]
soyaml.main()
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_main_help_locked(self):
filename = "/tmp/so-yaml.lock"
file = open(filename, "w")
file.write = "fake lock file"
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('time.sleep', new=MagicMock()) as mock_sleep:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
mock_sleep.assert_called_with(2)
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_main_help(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename, "key1"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key2: false\n"
self.assertEqual(actual, expected)
def test_remove_missing_args(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "{key1: { child1: 123, child2: abc }, key2: false}"
self.assertEqual(actual, expected)
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")

View File

@@ -403,6 +403,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5 [[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
[[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10 [[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20 [[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
true true
} }
@@ -414,7 +415,8 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4 [[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5 [[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10 [[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20 [[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
true true
} }
@@ -429,8 +431,7 @@ post_to_2.4.4() {
} }
post_to_2.4.5() { post_to_2.4.5() {
echo "Regenerating Elastic Agent Installers" echo "Nothing to apply"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.5 POSTVERSION=2.4.5
} }
@@ -446,6 +447,12 @@ post_to_2.4.20() {
POSTVERSION=2.4.20 POSTVERSION=2.4.20
} }
post_to_2.4.30() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.30
}
repo_sync() { repo_sync() {
echo "Sync the local repo." echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -460,7 +467,6 @@ stop_salt_master() {
echo "" echo ""
echo "Killing any queued Salt jobs on the manager." echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1 pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
set -e
echo "" echo ""
echo "Storing salt-master pid." echo "Storing salt-master pid."
@@ -468,6 +474,7 @@ stop_salt_master() {
echo "Found salt-master PID $MASTERPID" echo "Found salt-master PID $MASTERPID"
systemctl_func "stop" "salt-master" systemctl_func "stop" "salt-master"
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option." timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
set -e
} }
stop_salt_minion() { stop_salt_minion() {
@@ -480,14 +487,12 @@ stop_salt_minion() {
echo "" echo ""
echo "Killing Salt jobs on this node." echo "Killing Salt jobs on this node."
salt-call saltutil.kill_all_jobs --local salt-call saltutil.kill_all_jobs --local
set -e
echo "Storing salt-minion pid." echo "Storing salt-minion pid."
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1) MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
echo "Found salt-minion PID $MINIONPID" echo "Found salt-minion PID $MINIONPID"
systemctl_func "stop" "salt-minion" systemctl_func "stop" "salt-minion"
set +e
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
set -e set -e
} }
@@ -506,7 +511,7 @@ up_to_2.4.4() {
} }
up_to_2.4.5() { up_to_2.4.5() {
determine_elastic_agent_upgrade echo "Nothing to do for 2.4.5"
INSTALLEDVERSION=2.4.5 INSTALLEDVERSION=2.4.5
} }
@@ -523,6 +528,13 @@ up_to_2.4.20() {
INSTALLEDVERSION=2.4.20 INSTALLEDVERSION=2.4.20
} }
up_to_2.4.30() {
determine_elastic_agent_upgrade
rm -f /opt/so/state/estemplates*.txt
INSTALLEDVERSION=2.4.30
}
determine_elastic_agent_upgrade() { determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap update_elastic_agent_airgap
@@ -568,7 +580,7 @@ update_airgap_rules() {
rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/ rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/
} }
update_centos_repo() { update_airgap_repo() {
# Update the files in the repo # Update the files in the repo
echo "Syncing new updates to /nsm/repo" echo "Syncing new updates to /nsm/repo"
rsync -av $AGREPO/* /nsm/repo/ rsync -av $AGREPO/* /nsm/repo/
@@ -578,9 +590,9 @@ update_centos_repo() {
} }
update_salt_mine() { update_salt_mine() {
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host." echo "Populating the mine with mine_functions for each host."
set +e set +e
salt \* cmd.run cmd='MAININT=$(salt-call pillar.get host:mainint --out=newline_values_only) && salt-call mine.send name=network.ip_addrs interface="$MAININT"' salt \* mine.update -b 50
set -e set -e
} }
@@ -620,6 +632,7 @@ upgrade_check_salt() {
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion." echo "You are already running the correct version of Salt for Security Onion."
else else
echo "Salt needs to be upgraded to $NEWSALTVERSION."
UPGRADESALT=1 UPGRADESALT=1
fi fi
} }
@@ -628,22 +641,48 @@ upgrade_salt() {
SALTUPGRADED=True SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo "" echo ""
# If CentOS # If rhel family
if [[ $OS == 'centos' ]]; then if [[ $is_rpm ]]; then
echo "Removing yum versionlock for Salt." echo "Removing yum versionlock for Salt."
echo "" echo ""
yum versionlock delete "salt-*" yum versionlock delete "salt-*"
echo "Updating Salt packages." echo "Updating Salt packages."
echo "" echo ""
set +e set +e
run_check_net_err \ # if oracle run with -r to ignore repos set by bootstrap
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ if [[ $OS == 'oracle' ]]; then
"Could not update salt, please check $SOUP_LOG for details." run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
set -e set -e
echo "Applying yum versionlock for Salt." echo "Applying yum versionlock for Salt."
echo "" echo ""
yum versionlock add "salt-*" yum versionlock add "salt-*"
# Else do Ubuntu things # Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi fi
echo "Checking if Salt was upgraded." echo "Checking if Salt was upgraded."
@@ -655,7 +694,7 @@ upgrade_salt() {
echo "Once the issue is resolved, run soup again." echo "Once the issue is resolved, run soup again."
echo "Exiting." echo "Exiting."
echo "" echo ""
exit 0 exit 1
else else
echo "Salt upgrade success." echo "Salt upgrade success."
echo "" echo ""
@@ -736,14 +775,8 @@ main() {
echo "" echo ""
set_os set_os
if ! check_salt_master_status; then
echo "Could not talk to salt master"
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "SOUP will now attempt to start the salt-master service and exit."
exit 1
fi
echo "This node can communicate with the salt-master." check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "Checking to see if this is a manager." echo "Checking to see if this is a manager."
echo "" echo ""
@@ -789,11 +822,13 @@ main() {
set -e set -e
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
yum clean all update_airgap_repo
dnf clean all
check_os_updates check_os_updates
elif [[ $OS == 'oel' ]]; then elif [[ $OS == 'oracle' ]]; then
# sync remote repo down to local if not airgap # sync remote repo down to local if not airgap
repo_sync repo_sync
dnf clean all
check_os_updates check_os_updates
fi fi
@@ -808,7 +843,8 @@ main() {
echo "Hotfix applied" echo "Hotfix applied"
update_version update_version
enable_highstate enable_highstate
salt-call state.highstate -l info queue=True (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
else else
echo "" echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
@@ -829,7 +865,7 @@ main() {
else else
update_registry update_registry
set +e set +e
update_docker_containers "soup" "" "" "$SOUP_LOG" update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
set -e set -e
fi fi
@@ -844,6 +880,14 @@ main() {
echo "Upgrading Salt" echo "Upgrading Salt"
# Update the repo files so it can actually upgrade # Update the repo files so it can actually upgrade
upgrade_salt upgrade_salt
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
# * WARN: Not starting daemons on Debian based distributions
# is not working mostly because starting them is the default behaviour.
if [[ $is_deb ]]; then
stop_salt_minion
stop_salt_master
fi
fi fi
preupgrade_changes preupgrade_changes
@@ -854,11 +898,6 @@ main() {
update_airgap_rules update_airgap_rules
fi fi
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then
update_centos_repo
fi
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINIONID" =~ "_import" ]]; then if [[ ! "$MINIONID" =~ "_import" ]]; then
echo "" echo ""
@@ -881,7 +920,7 @@ main() {
# Testing that salt-master is up by checking that is it connected to itself # Testing that salt-master is up by checking that is it connected to itself
set +e set +e
echo "Waiting on the Salt Master service to be ready." echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details." check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e set -e
# update the salt-minion configs here and start the minion # update the salt-minion configs here and start the minion
@@ -906,7 +945,8 @@ main() {
echo "" echo ""
echo "Running a highstate. This could take several minutes." echo "Running a highstate. This could take several minutes."
set +e set +e
salt-call state.highstate -l info queue=True (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e set -e
stop_salt_master stop_salt_master
@@ -917,11 +957,12 @@ main() {
set +e set +e
echo "Waiting on the Salt Master service to be ready." echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details." check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e set -e
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
postupgrade_changes postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update [[ $is_airgap -eq 0 ]] && unmount_update

View File

@@ -0,0 +1,96 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
require_manager
# Inform user we are about to remove Elastic Fleet data
echo
echo "This script will remove the current Elastic Fleet install and all of its data and then rerun Elastic Fleet setup."
echo "This includes data previously ingested with Fleet such as Zeek and Suricata logs."
echo "Deployed Elastic Agents will no longer be enrolled and will need to be reinstalled."
echo "This script should only be used as a last resort to reinstall Elastic Fleet."
echo
echo "If you would like to proceed, type AGREE and hit ENTER."
echo
# Read user input
read INPUT
if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi
status "Uninstalling all Elastic Agents on all Grid Nodes..."
salt \* cmd.run "elastic-agent uninstall -f" queue=True
status "Stopping Fleet Container..."
so-elastic-fleet-stop --force
status "Deleting Fleet Data from Pillars..."
so-yaml.py remove /opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls elasticfleet
sed -i "/fleet_grid_enrollment_token_general.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
sed -i "/fleet_grid_enrollment_token_heavy.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
status "Deleting Elastic Fleet data..."
# Check to make sure that Elasticsearch is up & ready
RETURN_CODE=0
wait_for_web_response "https://localhost:9200/_cat/indices/.kibana*" "green open" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Elasticsearch not accessible, exiting script..."
exit 1
fi
ALIASES=".fleet-servers .fleet-policies-leader .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
for ALIAS in ${ALIASES}
do
# Get all concrete indices from alias
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
# Delete all resolved indices
for INDX in ${INDXS}
do
status "Deleting $INDX"
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
done
done
status "Deleting Fleet-related Data Streams..."
DATASTREAMS="logs-suricata-so","logs-kratos-so","logs-soc-so","logs-zeek-so"
JSON_STRING=$( jq -n \
--arg DATASTREAMLIST "$DATASTREAMS" \
'{"dataStreams":[$DATASTREAMLIST]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/index_management/delete_data_streams" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
status "Restarting Kibana..."
so-kibana-restart --force
status "Checking to make sure that Kibana API is up & ready..."
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Kibana API not accessible, exiting script..."
exit 1
fi
status "Removing Integrations State File..."
rm -f /opt/so/state/eaintegrations.txt
status "Starting Elastic Fleet Setup..."
so-elastic-fleet-setup
status "Re-installing Elastic Agent on all Grid Nodes..."
salt \* state.apply elasticfleet.install_agent_grid queue=True
status "Elastic Fleet Reset complete...."

View File

@@ -147,7 +147,7 @@ http {
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
location ~ ^/auth/.*?(login) { location ~ ^/auth/.*?(login|oidc/callback/) {
rewrite /auth/(.*) /$1 break; rewrite /auth/(.*) /$1 break;
limit_req zone=auth_throttle burst={{ NGINXMERGED.config.throttle_login_burst }} nodelay; limit_req zone=auth_throttle burst={{ NGINXMERGED.config.throttle_login_burst }} nodelay;
limit_req_status 429; limit_req_status 429;

View File

@@ -41,7 +41,7 @@ pcap_sbin:
- file_mode: 755 - file_mode: 755
{% if PCAPBPF %} {% if PCAPBPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %} {% if BPF_CALC['stderr'] == "" %}
{% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %} {% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %}
{% else %} {% else %}

View File

@@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
import logging
from time import sleep
from os import remove
log = logging.getLogger(__name__)
def start(interval=30):
log.info("checkmine engine started")
minionid = __grains__['id']
while True:
try:
ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt']
log.info('Successfully queried Salt mine for the CA.')
except:
log.error('Could not pull CA from the Salt mine.')
log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid)
try:
remove('/var/cache/salt/master/minions/%s/mine.p' % minionid)
log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid)
except FileNotFoundError:
log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid)
__salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt')
log.warning('Salt mine repopulated with /etc/pki/ca.crt')
sleep(interval)

View File

@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
import logging
from time import sleep
import os
import salt.client
log = logging.getLogger(__name__)
local = salt.client.LocalClient()
def start(interval=60):
def mine_delete(minion, func):
log.warning('checkmine engine: deleting mine function %s for %s' % (func, minion))
local.cmd(minion, 'mine.delete', [func])
def mine_flush(minion):
log.warning('checkmine engine: flushing mine cache for %s' % minion)
local.cmd(minion, 'mine.flush')
def mine_update(minion):
log.warning('checkmine engine: updating mine cache for %s' % minion)
local.cmd(minion, 'mine.update')
log.info("checkmine engine: started")
cachedir = __opts__['cachedir']
while True:
log.debug('checkmine engine: checking which minions are alive')
manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=False)
log.debug('checkmine engine: alive minions: %s' % ' , '.join(manage_alived))
for minion in manage_alived:
mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p')
# it is possible that a minion is alive, but hasn't created a mine file yet
try:
mine_size = os.path.getsize(mine_path)
log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size))
# For some reason the mine file can be corrupt and only be 1 byte in size
if mine_size == 1:
log.error('checkmine engine: found %s to be 1 byte' % mine_path)
mine_flush(minion)
mine_update(minion)
continue
except FileNotFoundError:
log.warning('checkmine engine: minion: %s %s does not exist' % (minion, mine_path))
mine_flush(minion)
mine_update(minion)
continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try:
mine_ip = network_ip_addrs[minion][0]
log.debug('checkmine engine: found minion %s has mine_ip: %s' % (minion, mine_ip))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a mine_ip' % (minion))
mine_delete(minion, 'network.ip_addrs')
mine_update(minion)
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
sleep(interval)

View File

@@ -1,4 +1,8 @@
mine_interval: 35 mine_interval: 25
mine_functions: mine_functions:
network.ip_addrs: network.ip_addrs:
- interface: {{ GLOBALS.main_interface }} - interface: {{ pillar.host.mainint }}
{% if grains.role in ['so-eval','so-import','so-manager','so-managersearch','so-standalone'] -%}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif -%}

View File

@@ -3,4 +3,4 @@ engines_dirs:
engines: engines:
- checkmine: - checkmine:
interval: 30 interval: 60

View File

@@ -5,25 +5,21 @@
{% set SPLITCHAR = '+' %} {% set SPLITCHAR = '+' %}
{% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %} {% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %} {% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %}
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %} {% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
{% else %} {% else %}
{% set SPLITCHAR = '-' %} {% set SPLITCHAR = '-' %}
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %} {% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %} {% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %}
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %} {% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
{% endif %} {% endif %}
{% set INSTALLEDSALTVERSION = grains.saltversion %} {% set INSTALLEDSALTVERSION = grains.saltversion %}
{% if grains.saltversion|string != SALTVERSION|string %} {% if grains.saltversion|string != SALTVERSION|string %}
{% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %} {% if grains.os_family|lower == 'redhat' %}
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% elif grains.os_family|lower == 'debian' %} {% elif grains.os_family|lower == 'debian' %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %}
{% endif %} {% endif %}
{% else %} {% else %}
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %} {% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt: salt:
master: master:
version: 3006.1 version: 3006.3

View File

@@ -12,25 +12,34 @@ hold_salt_master_package:
- name: salt-master - name: salt-master
{% endif %} {% endif %}
salt_master_service: # prior to 2.4.30 this engine ran on the manager with salt-minion
service.running: # this has changed to running with the salt-master in 2.4.30
- name: salt-master remove_engines_config:
- enable: True file.absent:
- name: /etc/salt/minion.d/engines.conf
- source: salt://salt/files/engines.conf
- watch_in:
- service: salt_minion_service
checkmine_engine: checkmine_engine:
file.managed: file.managed:
- name: /etc/salt/engines/checkmine.py - name: /etc/salt/engines/checkmine.py
- source: salt://salt/engines/checkmine.py - source: salt://salt/engines/master/checkmine.py
- makedirs: True - makedirs: True
- watch_in:
- service: salt_minion_service
engines_config: engines_config:
file.managed: file.managed:
- name: /etc/salt/minion.d/engines.conf - name: /etc/salt/master.d/engines.conf
- source: salt://salt/files/engines.conf - source: salt://salt/files/engines.conf
- watch_in:
- service: salt_minion_service salt_master_service:
service.running:
- name: salt-master
- enable: True
- watch:
- file: checkmine_engine
- file: engines_config
- order: last
{% else %} {% else %}
@@ -38,4 +47,4 @@ engines_config:
test.fail_without_changes: test.fail_without_changes:
- name: {{sls}}_state_not_allowed - name: {{sls}}_state_not_allowed
{% endif %} {% endif %}

View File

@@ -0,0 +1,13 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# this state was seperated from salt.minion state since it is called during setup
# GLOBALS are imported in the salt.minion state and that is not available at that point in setup
# this state is included in the salt.minion state
mine_functions:
file.managed:
- name: /etc/salt/minion.d/mine_functions.conf
- source: salt://salt/etc/minion.d/mine_functions.conf.jinja
- template: jinja

View File

@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt: salt:
minion: minion:
version: 3006.1 version: 3006.3
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds. service_start_delay: 30 # in seconds.

View File

@@ -12,6 +12,7 @@ include:
- salt - salt
- systemd.reload - systemd.reload
- repo.client - repo.client
- salt.mine_functions
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %} {% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
@@ -66,6 +67,9 @@ set_log_levels:
- "log_level: info" - "log_level: info"
- "log_level_logfile: info" - "log_level_logfile: info"
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a sleep timer on service start
# it is not required to restart the service
salt_minion_service_unit_file: salt_minion_service_unit_file:
file.managed: file.managed:
- name: {{ SYSTEMD_UNIT_FILE }} - name: {{ SYSTEMD_UNIT_FILE }}
@@ -78,14 +82,6 @@ salt_minion_service_unit_file:
{% endif %} {% endif %}
mine_functions:
file.managed:
- name: /etc/salt/minion.d/mine_functions.conf
- source: salt://salt/etc/minion.d/mine_functions.conf.jinja
- template: jinja
- defaults:
GLOBALS: {{ GLOBALS }}
# this has to be outside the if statement above since there are <requisite>_in calls to this state # this has to be outside the if statement above since there are <requisite>_in calls to this state
salt_minion_service: salt_minion_service:
service.running: service.running:
@@ -96,6 +92,5 @@ salt_minion_service:
- file: mine_functions - file: mine_functions
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %} {% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels - file: set_log_levels
- file: salt_minion_service_unit_file
{% endif %} {% endif %}
- order: last - order: last

File diff suppressed because it is too large Load Diff

View File

@@ -3,23 +3,6 @@
COMMAND=$1 COMMAND=$1
SENSORONI_CONTAINER=${SENSORONI_CONTAINER:-so-sensoroni} SENSORONI_CONTAINER=${SENSORONI_CONTAINER:-so-sensoroni}
function ci() {
HOME_DIR=$(dirname "$0")
TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
echo "Missing dependencies. Consider running the following command:"
echo " python -m pip install flake8 pytest pytest-cov"
exit 1
fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
}
function download() { function download() {
ANALYZERS=$1 ANALYZERS=$1
if [[ $ANALYZERS = "all" ]]; then if [[ $ANALYZERS = "all" ]]; then
@@ -36,5 +19,5 @@ function download() {
if [[ "$COMMAND" == "download" ]]; then if [[ "$COMMAND" == "download" ]]; then
download "$2" download "$2"
else else
ci ../../../../pyci.sh $@
fi fi

View File

@@ -0,0 +1,10 @@
# Malware Hash Registry
## Description
Search Team Cymru's Malware Hash Registry for a file hash.
## Configuration Requirements
None.
**NOTE:** If you try to run the Malware Hash Registry analyzer but it results in a "Name or service not known" error, then it may be a DNS issue. Folks using 8.8.4.4 or 8.8.8.8 as their DNS resolver have reported this issue. A potential workaround is to switch to another DNS resolver like 1.1.1.1.

View File

@@ -3,5 +3,5 @@ post_setup_cron:
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate' - name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
- identifier: post_setup_cron - identifier: post_setup_cron
- user: root - user: root
- minute: '*/1' - minute: '*/5'
- identifier: post_setup_cron - identifier: post_setup_cron

View File

@@ -13,11 +13,13 @@
{% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %} {% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %}
{% endfor %} {% endfor %}
{# add nodes from the logstash:nodes pillar to soc.server.modules.elastic.remoteHostUrls #} {# add all grid heavy nodes to soc.server.modules.elastic.remoteHostUrls #}
{% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %} {% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %}
{% for m in minions.keys() %} {% if node_type in ['heavynode'] %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %} {% for m in minions.keys() %}
{% endfor %} {% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
{% endfor %}
{% endif %}
{% endfor %} {% endfor %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %} {% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}

View File

@@ -59,6 +59,12 @@ soc:
target: _blank target: _blank
links: links:
- 'https://www.virustotal.com/gui/search/{value}' - 'https://www.virustotal.com/gui/search/{value}'
- name: Sublime Platform Email Review
description: Review email in Sublime Platform
icon: fa-external-link-alt
target: _blank
links:
- 'https://{:sublime.url}/messages/{:sublime.message_group_id}'
eventFields: eventFields:
default: default:
- soc_timestamp - soc_timestamp

View File

@@ -67,10 +67,10 @@ function manage_minion() {
response=$(so-minion "-o=$op" "-m=$minion_id") response=$(so-minion "-o=$op" "-m=$minion_id")
exit_code=$? exit_code=$?
if [[ exit_code -eq 0 ]]; then if [[ exit_code -eq 0 ]]; then
log "Successful command execution" log "Successful '$op' command execution on $minion_id"
respond "$id" "true" respond "$id" "true"
else else
log "Unsuccessful command execution: $response ($exit_code)" log "Unsuccessful '$op' command execution on $minion_id: $response ($exit_code)"
respond "$id" "false" respond "$id" "false"
fi fi
} }

View File

@@ -52,6 +52,8 @@ so-soctopus:
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch:
- file: /opt/so/conf/soctopus/SOCtopus.conf
- require: - require:
- file: soctopusconf - file: soctopusconf
- file: navigatordefaultlayer - file: navigatordefaultlayer

View File

@@ -129,7 +129,7 @@ surithresholding:
# BPF compilation and configuration # BPF compilation and configuration
{% if SURICATABPF %} {% if SURICATABPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %} {% if BPF_CALC['stderr'] == "" %}
{% set BPF_STATUS = 1 %} {% set BPF_STATUS = 1 %}
{% else %} {% else %}

View File

@@ -280,7 +280,7 @@ suricata:
mqtt: mqtt:
enabled: 'no' enabled: 'no'
http2: http2:
enabled: 'no' enabled: 'yes'
asn1-max-frames: 256 asn1-max-frames: 256
run-as: run-as:
user: suricata user: suricata

View File

@@ -152,7 +152,7 @@ plcronscript:
# BPF compilation and configuration # BPF compilation and configuration
{% if ZEEKBPF %} {% if ZEEKBPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %} {% if BPF_CALC['stderr'] == "" %}
{% set BPF_STATUS = 1 %} {% set BPF_STATUS = 1 %}
{% else %} {% else %}

View File

@@ -49,12 +49,13 @@ zeek:
- frameworks/files/hash-all-files - frameworks/files/hash-all-files
- frameworks/files/detect-MHR - frameworks/files/detect-MHR
- policy/frameworks/notice/extend-email/hostnames - policy/frameworks/notice/extend-email/hostnames
- policy/frameworks/notice/community-id
- policy/protocols/conn/community-id-logging
- ja3 - ja3
- hassh - hassh
- intel - intel
- cve-2020-0601 - cve-2020-0601
- securityonion/bpfconf - securityonion/bpfconf
- securityonion/communityid
- securityonion/file-extraction - securityonion/file-extraction
- oui-logging - oui-logging
- icsnpp-modbus - icsnpp-modbus
@@ -75,7 +76,7 @@ zeek:
- LogAscii::use_json = T; - LogAscii::use_json = T;
- CaptureLoss::watch_interval = 5 mins; - CaptureLoss::watch_interval = 5 mins;
networks: networks:
HOME_NET: HOME_NET:
- 192.168.0.0/16 - 192.168.0.0/16
- 10.0.0.0/8 - 10.0.0.0/8
- 172.16.0.0/12 - 172.16.0.0/12
@@ -120,4 +121,4 @@ zeek:
- stats - stats
- stderr - stderr
- stdout - stdout

View File

@@ -268,15 +268,6 @@ collect_dockernet() {
fi fi
} }
collect_es_space_limit() {
whiptail_log_size_limit "$log_size_limit"
while ! valid_int "$log_size_limit"; do # Upper/lower bounds?
whiptail_invalid_input
whiptail_log_size_limit "$log_size_limit"
done
}
collect_gateway() { collect_gateway() {
whiptail_management_interface_gateway whiptail_management_interface_gateway
@@ -286,28 +277,6 @@ collect_gateway() {
done done
} }
collect_homenet_mngr() {
whiptail_homenet_manager "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12"
while ! valid_cidr_list "$HNMANAGER"; do
whiptail_invalid_input
whiptail_homenet_manager "$HNMANAGER"
done
}
collect_homenet_snsr() {
if whiptail_homenet_sensor_inherit; then
export HNSENSOR=inherit
else
whiptail_homenet_sensor "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12"
while ! valid_cidr_list "$HNSENSOR"; do
whiptail_invalid_input
whiptail_homenet_sensor "$HNSENSOR"
done
fi
}
collect_hostname() { collect_hostname() {
collect_hostname_validate collect_hostname_validate
@@ -346,26 +315,6 @@ collect_idh_preferences() {
if [[ "$idh_preferences" != "" ]]; then IDH_MGTRESTRICT='True'; fi if [[ "$idh_preferences" != "" ]]; then IDH_MGTRESTRICT='True'; fi
} }
collect_idh_services() {
whiptail_idh_services
case "$IDH_SERVICES" in
'Linux Webserver (NAS Skin)')
IDH_SERVICES='"HTTP","FTP","SSH"'
;;
'MySQL Server')
IDH_SERVICES='"MYSQL","SSH"'
;;
'MSSQL Server')
IDH_SERVICES='"MSSQL","VNC'
;;
'Custom')
whiptail_idh_services_custom
IDH_SERVICES=$(echo "$IDH_SERVICES" | tr '[:blank:]' ',' )
;;
esac
}
collect_int_ip_mask() { collect_int_ip_mask() {
whiptail_management_interface_ip_mask whiptail_management_interface_ip_mask
@@ -425,71 +374,6 @@ collect_net_method() {
fi fi
} }
collect_ntp_servers() {
if whiptail_ntp_ask; then
[[ $is_airgap ]] && ntp_string=""
whiptail_ntp_servers "$ntp_string"
while ! valid_ntp_list "$ntp_string"; do
whiptail_invalid_input
whiptail_ntp_servers "$ntp_string"
done
IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array
else
ntp_servers=()
fi
}
collect_oinkcode() {
whiptail_oinkcode
while ! valid_string "$OINKCODE" "" "128"; do
whiptail_invalid_input
whiptail_oinkcode "$OINKCODE"
done
}
collect_patch_schedule() {
whiptail_patch_schedule
case "$patch_schedule" in
'New Schedule')
whiptail_patch_schedule_select_days
whiptail_patch_schedule_select_hours
collect_patch_schedule_name_new
patch_schedule_os_new
;;
'Import Schedule')
collect_patch_schedule_name_import
;;
'Automatic')
PATCHSCHEDULENAME='auto'
;;
'Manual')
PATCHSCHEDULENAME='manual'
;;
esac
}
collect_patch_schedule_name_new() {
whiptail_patch_name_new_schedule
while ! valid_string "$PATCHSCHEDULENAME"; do
whiptail_invalid_string "schedule name"
whiptail_patch_name_new_schedule "$PATCHSCHEDULENAME"
done
}
collect_patch_schedule_name_import() {
whiptail_patch_schedule_import
while ! valid_string "$PATCHSCHEDULENAME"; do
whiptail_invalid_string "schedule name"
whiptail_patch_schedule_import "$PATCHSCHEDULENAME"
done
}
collect_proxy() { collect_proxy() {
[[ -n $TESTING ]] && return [[ -n $TESTING ]] && return
local ask=${1:-true} local ask=${1:-true}
@@ -649,8 +533,8 @@ configure_minion() {
"log_level_logfile: info"\ "log_level_logfile: info"\
"log_file: /opt/so/log/salt/minion" >> "$minion_config" "log_file: /opt/so/log/salt/minion" >> "$minion_config"
cp -f ../salt/salt/etc/minion.d/mine_functions.conf.jinja /etc/salt/minion.d/mine_functions.conf info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "$MNIC"}}'"
sed -i "s/{{ GLOBALS.main_interface }}/$MNIC/" /etc/salt/minion.d/mine_functions.conf salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="{'host': {'mainint': $MNIC}}"
{ {
logCmd "systemctl enable salt-minion"; logCmd "systemctl enable salt-minion";
@@ -658,47 +542,6 @@ configure_minion() {
} >> "$setup_log" 2>&1 } >> "$setup_log" 2>&1
} }
configure_ntp() {
local chrony_conf=/etc/chrony.conf
# Install chrony if it isn't already installed
if ! command -v chronyc &> /dev/null; then
logCmd "dnf -y install chrony"
fi
[[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak"
printf '%s\n' "# NTP server list" > $chrony_conf
# Build list of servers
for addr in "${ntp_servers[@]}"; do
echo "server $addr iburst" >> $chrony_conf
done
printf '\n%s\n' "# Config options" >> $chrony_conf
printf '%s\n' \
'driftfile /var/lib/chrony/drift' \
'makestep 1.0 3' \
'rtcsync' \
'logdir /var/log/chrony' >> $chrony_conf
if [[ $is_rpm ]]; then
systemctl enable chronyd
systemctl restart chronyd
else
systemctl enable chrony
systemctl restart chrony
fi
# Tell the chrony daemon to sync time & update the system time
# Since these commands only make a call to chronyd, wait after each command to make sure the changes are made
printf "Syncing chrony time to server: "
chronyc -a 'burst 4/4' && sleep 30
printf "Forcing chrony to update the time: "
chronyc -a makestep && sleep 30
}
checkin_at_boot() { checkin_at_boot() {
local minion_config=/etc/salt/minion local minion_config=/etc/salt/minion
@@ -719,7 +562,7 @@ check_requirements() {
req_cores=4 req_cores=4
req_nics=2 req_nics=2
elif [[ $is_standalone ]]; then elif [[ $is_standalone ]]; then
req_mem=24 req_mem=16
req_cores=4 req_cores=4
req_nics=2 req_nics=2
elif [[ $is_manager ]]; then elif [[ $is_manager ]]; then
@@ -743,7 +586,7 @@ check_requirements() {
req_cores=4 req_cores=4
req_nics=1 req_nics=1
elif [[ $is_heavynode ]]; then elif [[ $is_heavynode ]]; then
req_mem=24 req_mem=16
req_cores=4 req_cores=4
req_nics=2 req_nics=2
elif [[ $is_idh ]]; then elif [[ $is_idh ]]; then
@@ -808,6 +651,17 @@ check_requirements() {
if [[ $total_mem_hr -lt $req_mem ]]; then if [[ $total_mem_hr -lt $req_mem ]]; then
whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB" whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB"
if [[ $is_standalone || $is_heavynode ]]; then
echo "This install type will fail with less than $req_mem GB of memory. Exiting setup."
exit 0
fi
fi
if [[ $is_standalone || $is_heavynode ]]; then
if [[ $total_mem_hr -gt 15 && $total_mem_hr -lt 24 ]]; then
low_mem=true
else
low_mem=false
fi
fi fi
} }
@@ -1055,16 +909,6 @@ download_elastic_agent_artifacts() {
fi fi
} }
installer_progress_loop() {
local i=0
local msg="${1:-Performing background actions...}"
while true; do
[[ $i -lt 98 ]] && ((i++))
set_progress_str "$i" "$msg" nolog
[[ $i -gt 0 ]] && sleep 5s
done
}
installer_prereq_packages() { installer_prereq_packages() {
if [[ $is_deb ]]; then if [[ $is_deb ]]; then
# Print message to stdout so the user knows setup is doing something # Print message to stdout so the user knows setup is doing something
@@ -1123,9 +967,7 @@ docker_seed_registry() {
if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then
if [ "$install_type" == 'IMPORT' ]; then if [ "$install_type" == 'IMPORT' ]; then
container_list 'so-import' container_list 'so-import'
elif [ "$install_type" == 'HELIXSENSOR' ]; then
container_list 'so-helix'
else else
container_list container_list
fi fi
@@ -1258,7 +1100,7 @@ generate_ssl() {
# if the install type is a manager then we need to wait for the minion to be ready before trying # if the install type is a manager then we need to wait for the minion to be ready before trying
# to run the ssl state since we need the minion to sign the certs # to run the ssl state since we need the minion to sign the certs
if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then
wait_for_salt_minion (wait_for_salt_minion "$MINION_ID" "5" '/dev/stdout' || fail_setup) 2>&1 | tee -a "$setup_log"
fi fi
info "Applying SSL state" info "Applying SSL state"
logCmd "salt-call state.apply ssl -l info" logCmd "salt-call state.apply ssl -l info"
@@ -1384,7 +1226,7 @@ ls_heapsize() {
fi fi
case "$install_type" in case "$install_type" in
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE') 'MANAGERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
LS_HEAP_SIZE='1000m' LS_HEAP_SIZE='1000m'
;; ;;
'EVAL') 'EVAL')
@@ -1648,21 +1490,6 @@ network_setup() {
logCmd "sed -i '/\$MNIC/${INTERFACE}/g' /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable" logCmd "sed -i '/\$MNIC/${INTERFACE}/g' /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable"
} }
ntp_pillar_entries() {
local pillar_file=$local_salt_dir/pillar/minions/$MINION_ID.sls
if [[ ${#ntp_servers[@]} -gt 0 ]]; then
printf '%s\n'\
"ntp:"\
" servers:" > "$pillar_file"
for addr in "${ntp_servers[@]}"; do
printf '%s\n' " - '$addr'" >> "$pillar_file"
done
fi
}
parse_install_username() { parse_install_username() {
# parse out the install username so things copy correctly # parse out the install username so things copy correctly
INSTALLUSERNAME=${SUDO_USER:-${USER}} INSTALLUSERNAME=${SUDO_USER:-${USER}}
@@ -1881,7 +1708,11 @@ drop_install_options() {
echo "INTERFACE=$INTERFACE" >> /opt/so/install.txt echo "INTERFACE=$INTERFACE" >> /opt/so/install.txt
NODETYPE=${install_type^^} NODETYPE=${install_type^^}
echo "NODETYPE=$NODETYPE" >> /opt/so/install.txt echo "NODETYPE=$NODETYPE" >> /opt/so/install.txt
echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt if [[ $low_mem == "true" ]]; then
echo "CORECOUNT=1" >> /opt/so/install.txt
else
echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt
fi
echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt
echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt
echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt
@@ -1972,6 +1803,7 @@ securityonion_repo() {
} }
repo_sync_local() { repo_sync_local() {
SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //')
info "Repo Sync" info "Repo Sync"
if [[ $is_supported ]]; then if [[ $is_supported ]]; then
# Sync the repo from the the SO repo locally. # Sync the repo from the the SO repo locally.
@@ -2021,7 +1853,7 @@ repo_sync_local() {
curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo
rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
curl -fsSL https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/3006.1.repo | tee /etc/yum.repos.d/salt.repo curl -fsSL "https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/$SALTVERSION.repo" | tee /etc/yum.repos.d/salt.repo
dnf repolist dnf repolist
curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install
else else
@@ -2111,11 +1943,6 @@ saltify() {
} }
# Run a salt command to generate the minion key
salt_firstcheckin() {
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
}
salt_install_module_deps() { salt_install_module_deps() {
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/" logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/"
logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/" logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/"
@@ -2498,10 +2325,6 @@ wait_for_file() {
return 1 return 1
} }
wait_for_salt_minion() {
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup
}
verify_setup() { verify_setup() {
info "Verifying setup" info "Verifying setup"
set -o pipefail set -o pipefail

View File

@@ -91,7 +91,7 @@ fi
# if packages are updated and the box isn't rebooted # if packages are updated and the box isn't rebooted
if [[ $is_debian ]]; then if [[ $is_debian ]]; then
update_packages update_packages
if [[ -f "/var/run/reboot-required" ]]; then if [[ -f "/var/run/reboot-required" ]] && [ -z "$test_profile" ]; then
whiptail_debian_reboot_required whiptail_debian_reboot_required
reboot reboot
fi fi
@@ -676,7 +676,11 @@ if ! [[ -f $install_opt_file ]]; then
export MAINIP=$MAINIP export MAINIP=$MAINIP
export PATCHSCHEDULENAME=$PATCHSCHEDULENAME export PATCHSCHEDULENAME=$PATCHSCHEDULENAME
export INTERFACE=$INTERFACE export INTERFACE=$INTERFACE
export CORECOUNT=$lb_procs if [[ $low_mem == "true" ]]; then
export CORECOUNT=1
else
export CORECOUNT=$lb_procs
fi
export LSHOSTNAME=$HOSTNAME export LSHOSTNAME=$HOSTNAME
export LSHEAP=$LS_HEAP_SIZE export LSHEAP=$LS_HEAP_SIZE
export CPUCORES=$num_cpu_cores export CPUCORES=$num_cpu_cores
@@ -714,6 +718,17 @@ if ! [[ -f $install_opt_file ]]; then
logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common.packages"
logCmd "salt-call state.apply common" logCmd "salt-call state.apply common"
# this will apply the salt.minion state first since salt.master includes salt.minion
logCmd "salt-call state.apply salt.master"
# wait here until we get a response from the salt-master since it may have just restarted
# exit setup after 5-6 minutes of trying
check_salt_master_status || fail "Can't access salt master or it is not ready"
# apply the ca state to create the ca and put it in the mine early in the install
# the minion ip will already be in the mine from configure_minion function in so-functions
generate_ca
# this will also call the ssl state since docker requires the intca
# the salt-minion service will need to be up on the manager to sign requests
generate_ssl
logCmd "salt-call state.apply docker" logCmd "salt-call state.apply docker"
firewall_generate_templates firewall_generate_templates
set_initial_firewall_policy set_initial_firewall_policy
@@ -721,8 +736,6 @@ if ! [[ -f $install_opt_file ]]; then
title "Downloading Elastic Agent Artifacts" title "Downloading Elastic Agent Artifacts"
download_elastic_agent_artifacts download_elastic_agent_artifacts
generate_ca
generate_ssl
logCmd "salt-call state.apply -l info firewall" logCmd "salt-call state.apply -l info firewall"
# create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf # create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf
@@ -759,8 +772,11 @@ if ! [[ -f $install_opt_file ]]; then
info "Restarting SOC to pick up initial user" info "Restarting SOC to pick up initial user"
logCmd "so-soc-restart" logCmd "so-soc-restart"
title "Setting up Elastic Fleet" title "Setting up Elastic Fleet"
logCmd "salt-call state.apply elasticfleet.config" logCmd "salt-call state.apply elasticfleet.config"
logCmd "so-elastic-fleet-setup" if ! logCmd so-elastic-fleet-setup; then
error "Failed to run so-elastic-fleet-setup"
fail_setup
fi
if [[ ! $is_import ]]; then if [[ ! $is_import ]]; then
title "Setting up Playbook" title "Setting up Playbook"
logCmd "so-playbook-reset" logCmd "so-playbook-reset"
@@ -768,8 +784,6 @@ if ! [[ -f $install_opt_file ]]; then
checkin_at_boot checkin_at_boot
set_initial_firewall_access set_initial_firewall_access
logCmd "salt-call schedule.enable -linfo --local" logCmd "salt-call schedule.enable -linfo --local"
systemctl restart salt-master
systemctl restart salt-minion
verify_setup verify_setup
else else
touch /root/accept_changes touch /root/accept_changes

View File

@@ -25,7 +25,8 @@ log_has_errors() {
# Ignore salt mast cached public key and minion failed to auth because this is a test # Ignore salt mast cached public key and minion failed to auth because this is a test
# to see if the salt key had already been accepted. # to see if the salt key had already been accepted.
# Ignore failed to connect to ::1 since we have most curls wrapped in a retry. # Ignore failed to connect to since we have most curls wrapped in a retry and there are
# multiple mirrors available.
# Ignore perl-Error- since that is the name of a Perl package SO installs. # Ignore perl-Error- since that is the name of a Perl package SO installs.
@@ -35,11 +36,13 @@ log_has_errors() {
# This is ignored for Ubuntu # This is ignored for Ubuntu
# Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target # Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target
# may be requested by dependency only (it is configured to refuse manual start/stop). # may be requested by dependency only (it is configured to refuse manual start/stop).
# Command failed with exit code is output during retry loops.
grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \ grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \
grep -vE "The Salt Master has cached the public key for this node" | \ grep -vE "The Salt Master has cached the public key for this node" | \
grep -vE "Minion failed to authenticate with the master" | \ grep -vE "Minion failed to authenticate with the master" | \
grep -vE "Failed to connect to ::1" | \ grep -vE "Failed to connect to " | \
grep -vE "Failed to set locale" | \ grep -vE "Failed to set locale" | \
grep -vE "perl-Error-" | \ grep -vE "perl-Error-" | \
grep -vE "Failed:\s*?[0-9]+" | \ grep -vE "Failed:\s*?[0-9]+" | \
@@ -54,11 +57,15 @@ log_has_errors() {
grep -vE "Login Failed Details" | \ grep -vE "Login Failed Details" | \
grep -vE "response from daemon: unauthorized" | \ grep -vE "response from daemon: unauthorized" | \
grep -vE "Reading first line of patchfile" | \ grep -vE "Reading first line of patchfile" | \
grep -vE "Command failed with exit code" | \
grep -vE "Running scope as unit" &> "$error_log" grep -vE "Running scope as unit" &> "$error_log"
if [[ $? -eq 0 ]]; then if [[ $? -eq 0 ]]; then
# This function succeeds (returns 0) if errors are detected
return 0 return 0
fi fi
# No errors found, return 1 (function failed to find errors)
return 1 return 1
} }
@@ -117,7 +124,10 @@ main() {
echo "WARNING: Failed setup a while ago" echo "WARNING: Failed setup a while ago"
exit_code=1 exit_code=1
elif log_has_errors; then elif log_has_errors; then
echo "WARNING: Errors detected during setup" echo "WARNING: Errors detected during setup."
echo "--------- ERRORS ---------"
cat $error_log
echo "--------------------------"
exit_code=1 exit_code=1
touch /root/failure touch /root/failure
elif using_iso && cron_error_in_mail_spool; then elif using_iso && cron_error_in_mail_spool; then

Binary file not shown.