Merge remote-tracking branch 'origin/2.4/dev' into upgrade/salt3006.3

This commit is contained in:
m0duspwnens
2023-10-19 09:15:31 -04:00
25 changed files with 293 additions and 86 deletions

View File

@@ -50,6 +50,12 @@ pki_public_ca_crt:
attempts: 5
interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False

View File

@@ -8,7 +8,7 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.8.2"
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
@@ -133,15 +133,27 @@ check_elastic_license() {
}
check_salt_master_status() {
local timeout=$1
echo "Checking if we can talk to the salt master"
salt-call state.show_top concurrent=true
return
local count=0
local attempts="${1:- 10}"
current_time="$(date '+%b %d %H:%M:%S')"
echo "Checking if we can access the salt master and that it is ready at: ${current_time}"
while ! salt-call state.show_top -l error concurrent=true 1> /dev/null; do
current_time="$(date '+%b %d %H:%M:%S')"
echo "Can't access salt master or it is not ready at: ${current_time}"
((count+=1))
if [[ $count -eq $attempts ]]; then
# 10 attempts takes about 5.5 minutes
echo "Gave up trying to access salt-master"
return 1
fi
done
current_time="$(date '+%b %d %H:%M:%S')"
echo "Successfully accessed and salt master ready at: ${current_time}"
return 0
}
check_salt_minion_status() {
local timeout=$1
local timeout="${1:-5}"
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1
local status=$?

View File

@@ -137,7 +137,7 @@ update_docker_containers() {
for i in "${TRUSTED_CONTAINERS[@]}"
do
if [ -z "$PROGRESS_CALLBACK" ]; then
echo "Downloading $i" >> "$LOG_FILE" 2>&1
echo "Downloading $i" >> "$LOG_FILE" 2>&1
else
$PROGRESS_CALLBACK $i
fi

View File

@@ -136,6 +136,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -230,4 +231,4 @@ else
echo -e "\nResult: One or more errors found"
fi
exit $RESULT
exit $RESULT

View File

@@ -6,6 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# include ssl since docker service requires the intca
include:
- ssl
dockergroup:
group.present:
@@ -86,6 +89,11 @@ docker_running:
- enable: True
- watch:
- file: docker_daemon
- x509: trusttheca
- require:
- file: docker_daemon
- x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
# 57314 = Strelka, 47760-47860 = Zeek

View File

@@ -5,7 +5,7 @@
"package": {
"name": "endpoint",
"title": "Elastic Defend",
"version": "8.8.0"
"version": "8.10.2"
},
"enabled": true,
"policy_id": "endpoints-initial",

18
salt/elasticfleet/tools/sbin/so-elastic-fleet-common Executable file → Normal file
View File

@@ -42,6 +42,23 @@ elastic_fleet_integration_create() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_integration_remove() {
AGENT_POLICY=$1
NAME=$2
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$AGENT_POLICY" | jq -r '.item.package_policies[] | select(.name=="'"$NAME"'") | .id')
JSON_STRING=$( jq -n \
--arg INTEGRATIONID "$INTEGRATION_ID" \
'{"packagePolicyIds":[$INTEGRATIONID]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_integration_update() {
UPDATE_ID=$1
@@ -98,3 +115,4 @@ elastic_fleet_policy_update() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}

View File

@@ -0,0 +1,27 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# Usage: Run with --force to update the Elastic Defend integration policy
. /usr/sbin/so-elastic-fleet-common
# Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
if [ "$1" = "--force" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
else
printf "\n\nIntegration $NAME exists - Not updating - rerun with --force to force the update.\n"
fi
else
printf "\n\nIntegration does not exist - Creating integration\n"
elastic_fleet_integration_create "@$INTEGRATION"
fi
done

View File

@@ -12,6 +12,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# First, check for any package upgrades
/usr/sbin/so-elastic-fleet-package-upgrade
# Second, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do
@@ -65,3 +68,4 @@ else
exit $RETURN_CODE
fi

View File

@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.8.2","id": "8.8.2","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

View File

@@ -63,7 +63,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.8.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done

View File

@@ -578,7 +578,7 @@ update_centos_repo() {
}
update_salt_mine() {
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
echo "Populating the mine with mine_functions for each host."
set +e
salt \* mine.update -b 50
set -e
@@ -691,13 +691,16 @@ verify_latest_update_script() {
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
# fix_wazuh
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
salt-call state.apply elasticfleet -l info queue=True
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
# 2_3_10_hotfix_1
# else
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
# fi
fi
}
@@ -733,14 +736,8 @@ main() {
echo ""
set_os
if ! check_salt_master_status; then
echo "Could not talk to salt master"
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "SOUP will now attempt to start the salt-master service and exit."
exit 1
fi
echo "This node can communicate with the salt-master."
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "Checking to see if this is a manager."
echo ""
@@ -826,7 +823,7 @@ main() {
else
update_registry
set +e
update_docker_containers "soup" "" "" "$SOUP_LOG"
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
set -e
fi
@@ -878,7 +875,7 @@ main() {
# Testing that salt-master is up by checking that is it connected to itself
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
# update the salt-minion configs here and start the minion
@@ -914,7 +911,7 @@ main() {
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."

View File

@@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
import logging
from time import sleep
import os
import salt.client
log = logging.getLogger(__name__)
local = salt.client.LocalClient()
def start(interval=60):
def mine_delete(minion, func):
log.warning('checkmine engine: deleting mine function %s for %s' % (func, minion))
local.cmd(minion, 'mine.delete', [func])
def mine_flush(minion):
log.warning('checkmine engine: flushing mine cache for %s' % minion)
local.cmd(minion, 'mine.flush')
def mine_update(minion):
log.warning('checkmine engine: updating mine cache for %s' % minion)
local.cmd(minion, 'mine.update')
log.info("checkmine engine: started")
cachedir = __opts__['cachedir']
while True:
log.debug('checkmine engine: checking which minions are alive')
manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=False)
log.debug('checkmine engine: alive minions: %s' % ' , '.join(manage_alived))
for minion in manage_alived:
mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p')
# it is possible that a minion is alive, but hasn't created a mine file yet
try:
mine_size = os.path.getsize(mine_path)
log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size))
# For some reason the mine file can be corrupt and only be 1 byte in size
if mine_size == 1:
log.error('checkmine engine: found %s to be 1 byte' % mine_path)
mine_flush(minion)
mine_update(minion)
continue
except FileNotFoundError:
log.warning('checkmine engine: minion: %s %s does not exist' % (minion, mine_path))
mine_flush(minion)
mine_update(minion)
continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try:
mine_ip = network_ip_addrs[minion][0]
log.debug('checkmine engine: found minion %s has mine_ip: %s' % (minion, mine_ip))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a mine_ip' % (minion))
mine_delete(minion, 'network.ip_addrs')
mine_update(minion)
sleep(interval)

View File

@@ -0,0 +1,6 @@
engines_dirs:
- /etc/salt/engines
engines:
- checkmine:
interval: 60

View File

@@ -12,22 +12,34 @@ hold_salt_master_package:
- name: salt-master
{% endif %}
# prior to 2.4.30 this engine ran on the manager with salt-minion
# this has changed to running with the salt-master in 2.4.30
remove_engines_config:
file.absent:
- name: /etc/salt/minion.d/engines.conf
- source: salt://salt/files/engines.conf
- watch_in:
- service: salt_minion_service
checkmine_engine:
file.managed:
- name: /etc/salt/engines/checkmine.py
- source: salt://salt/engines/master/checkmine.py
- makedirs: True
engines_config:
file.managed:
- name: /etc/salt/master.d/engines.conf
- source: salt://salt/files/engines.conf
salt_master_service:
service.running:
- name: salt-master
- enable: True
checkmine_engine:
file.absent:
- name: /etc/salt/engines/checkmine.py
- watch_in:
- service: salt_minion_service
engines_config:
file.absent:
- name: /etc/salt/minion.d/engines.conf
- watch_in:
- service: salt_minion_service
- watch:
- file: checkmine_engine
- file: engines_config
- order: last
{% else %}

View File

@@ -67,6 +67,9 @@ set_log_levels:
- "log_level: info"
- "log_level_logfile: info"
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a sleep timer on service start
# it is not required to restart the service
salt_minion_service_unit_file:
file.managed:
- name: {{ SYSTEMD_UNIT_FILE }}
@@ -89,6 +92,5 @@ salt_minion_service:
- file: mine_functions
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels
- file: salt_minion_service_unit_file
{% endif %}
- order: last

View File

@@ -0,0 +1,10 @@
# Malware Hash Registry
## Description
Search Team Cymru's Malware Hash Registry for a file hash.
## Configuration Requirements
None.
**NOTE:** If you try to run the Malware Hash Registry analyzer but it results in a "Name or service not known" error, then it may be a DNS issue. Folks using 8.8.4.4 or 8.8.8.8 as their DNS resolver have reported this issue. A potential workaround is to switch to another DNS resolver like 1.1.1.1.

View File

@@ -13,11 +13,13 @@
{% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %}
{% endfor %}
{# add nodes from the logstash:nodes pillar to soc.server.modules.elastic.remoteHostUrls #}
{# add all grid heavy nodes to soc.server.modules.elastic.remoteHostUrls #}
{% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %}
{% for m in minions.keys() %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
{% endfor %}
{% if node_type in ['heavynode'] %}
{% for m in minions.keys() %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
{% endfor %}
{% endif %}
{% endfor %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}

View File

@@ -49,12 +49,13 @@ zeek:
- frameworks/files/hash-all-files
- frameworks/files/detect-MHR
- policy/frameworks/notice/extend-email/hostnames
- policy/frameworks/notice/community-id
- policy/protocols/conn/community-id-logging
- ja3
- hassh
- intel
- cve-2020-0601
- securityonion/bpfconf
- securityonion/communityid
- securityonion/file-extraction
- oui-logging
- icsnpp-modbus
@@ -75,7 +76,7 @@ zeek:
- LogAscii::use_json = T;
- CaptureLoss::watch_interval = 5 mins;
networks:
HOME_NET:
HOME_NET:
- 192.168.0.0/16
- 10.0.0.0/8
- 172.16.0.0/12
@@ -120,4 +121,4 @@ zeek:
- stats
- stderr
- stdout