mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge remote-tracking branch 'origin/2.4/dev' into upgrade/salt3006.3
This commit is contained in:
@@ -1,18 +1,18 @@
|
||||
### 2.4.20-20231006 ISO image released on 2023/10/06
|
||||
### 2.4.20-20231012 ISO image released on 2023/10/12
|
||||
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.20-20231006 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso
|
||||
2.4.20-20231012 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso
|
||||
|
||||
MD5: 269F00308C53976BF0EAE788D1DB29DB
|
||||
SHA1: 3F7C2324AE1271112F3B752BA4724AF36688FC27
|
||||
SHA256: 542B8B3F4F75AD24DC78007F8FE0857E00DC4CC9F4870154DCB8D5D0C4144B65
|
||||
MD5: 7D6ACA843068BA9432B3FF63BFD1EF0F
|
||||
SHA1: BEF2B906066A1B04921DF0B80E7FDD4BC8ECED5C
|
||||
SHA256: 5D511D50F11666C69AE12435A47B9A2D30CB3CC88F8D38DC58A5BC0ECADF1BF5
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.20-20231006.iso.sig securityonion-2.4.20-20231006.iso
|
||||
gpg --verify securityonion-2.4.20-20231012.iso.sig securityonion-2.4.20-20231012.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Tue 03 Oct 2023 11:40:51 AM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Thu 12 Oct 2023 01:28:32 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -7,19 +7,23 @@
|
||||
tgt_type='compound') | dictsort()
|
||||
%}
|
||||
|
||||
{% set hostname = cached_grains[minionid]['host'] %}
|
||||
{% set node_type = minionid.split('_')[1] %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = cached_grains[minionid]['host'] %}
|
||||
{% set node_type = minionid.split('_')[1] %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
logstash:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
|
||||
@@ -4,18 +4,22 @@
|
||||
{% set hostname = minionid.split('_')[0] %}
|
||||
{% set node_type = minionid.split('_')[1] %}
|
||||
{% set is_alive = False %}
|
||||
{% if minionid in manage_alived.keys() %}
|
||||
{% if ip[0] == manage_alived[minionid] %}
|
||||
{% set is_alive = True %}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% if minionid in manage_alived.keys() %}
|
||||
{% if ip[0] == manage_alived[minionid] %}
|
||||
{% set is_alive = True %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -50,6 +50,12 @@ pki_public_ca_crt:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
mine_update_ca_crt:
|
||||
module.run:
|
||||
- mine.update: []
|
||||
- onchanges:
|
||||
- x509: pki_public_ca_crt
|
||||
|
||||
cakeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# Elastic agent is not managed by salt. Because of this we must store this base information in a
|
||||
# script that accompanies the soup system. Since so-common is one of those special soup files,
|
||||
# and since this same logic is required during installation, it's included in this file.
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.8.2"
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
|
||||
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
@@ -133,15 +133,27 @@ check_elastic_license() {
|
||||
}
|
||||
|
||||
check_salt_master_status() {
|
||||
local timeout=$1
|
||||
echo "Checking if we can talk to the salt master"
|
||||
salt-call state.show_top concurrent=true
|
||||
|
||||
return
|
||||
local count=0
|
||||
local attempts="${1:- 10}"
|
||||
current_time="$(date '+%b %d %H:%M:%S')"
|
||||
echo "Checking if we can access the salt master and that it is ready at: ${current_time}"
|
||||
while ! salt-call state.show_top -l error concurrent=true 1> /dev/null; do
|
||||
current_time="$(date '+%b %d %H:%M:%S')"
|
||||
echo "Can't access salt master or it is not ready at: ${current_time}"
|
||||
((count+=1))
|
||||
if [[ $count -eq $attempts ]]; then
|
||||
# 10 attempts takes about 5.5 minutes
|
||||
echo "Gave up trying to access salt-master"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
current_time="$(date '+%b %d %H:%M:%S')"
|
||||
echo "Successfully accessed and salt master ready at: ${current_time}"
|
||||
return 0
|
||||
}
|
||||
|
||||
check_salt_minion_status() {
|
||||
local timeout=$1
|
||||
local timeout="${1:-5}"
|
||||
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
|
||||
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1
|
||||
local status=$?
|
||||
|
||||
@@ -137,7 +137,7 @@ update_docker_containers() {
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
if [ -z "$PROGRESS_CALLBACK" ]; then
|
||||
echo "Downloading $i" >> "$LOG_FILE" 2>&1
|
||||
echo "Downloading $i" >> "$LOG_FILE" 2>&1
|
||||
else
|
||||
$PROGRESS_CALLBACK $i
|
||||
fi
|
||||
|
||||
@@ -136,6 +136,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
@@ -230,4 +231,4 @@ else
|
||||
echo -e "\nResult: One or more errors found"
|
||||
fi
|
||||
|
||||
exit $RESULT
|
||||
exit $RESULT
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# include ssl since docker service requires the intca
|
||||
include:
|
||||
- ssl
|
||||
|
||||
dockergroup:
|
||||
group.present:
|
||||
@@ -86,6 +89,11 @@ docker_running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: docker_daemon
|
||||
- x509: trusttheca
|
||||
- require:
|
||||
- file: docker_daemon
|
||||
- x509: trusttheca
|
||||
|
||||
|
||||
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
|
||||
# 57314 = Strelka, 47760-47860 = Zeek
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.8.0"
|
||||
"version": "8.10.2"
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
18
salt/elasticfleet/tools/sbin/so-elastic-fleet-common
Executable file → Normal file
18
salt/elasticfleet/tools/sbin/so-elastic-fleet-common
Executable file → Normal file
@@ -42,6 +42,23 @@ elastic_fleet_integration_create() {
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
}
|
||||
|
||||
|
||||
elastic_fleet_integration_remove() {
|
||||
|
||||
AGENT_POLICY=$1
|
||||
|
||||
NAME=$2
|
||||
|
||||
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$AGENT_POLICY" | jq -r '.item.package_policies[] | select(.name=="'"$NAME"'") | .id')
|
||||
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg INTEGRATIONID "$INTEGRATION_ID" \
|
||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
}
|
||||
|
||||
elastic_fleet_integration_update() {
|
||||
|
||||
UPDATE_ID=$1
|
||||
@@ -98,3 +115,4 @@ elastic_fleet_policy_update() {
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# Usage: Run with --force to update the Elastic Defend integration policy
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
# Manage Elastic Defend Integration for Initial Endpoints Policy
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
|
||||
do
|
||||
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
if [ "$1" = "--force" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
else
|
||||
printf "\n\nIntegration $NAME exists - Not updating - rerun with --force to force the update.\n"
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
fi
|
||||
done
|
||||
4
salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load
Executable file → Normal file
4
salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load
Executable file → Normal file
@@ -12,6 +12,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
# First, check for any package upgrades
|
||||
/usr/sbin/so-elastic-fleet-package-upgrade
|
||||
|
||||
# Second, configure Elastic Defend Integration seperately
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||
|
||||
# Initial Endpoints
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
|
||||
do
|
||||
@@ -65,3 +68,4 @@ else
|
||||
exit $RETURN_CODE
|
||||
fi
|
||||
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.8.2","id": "8.8.2","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
|
||||
@@ -63,7 +63,7 @@ update() {
|
||||
|
||||
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
|
||||
for i in "${LINES[@]}"; do
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.8.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
|
||||
done
|
||||
|
||||
|
||||
@@ -578,7 +578,7 @@ update_centos_repo() {
|
||||
}
|
||||
|
||||
update_salt_mine() {
|
||||
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
|
||||
echo "Populating the mine with mine_functions for each host."
|
||||
set +e
|
||||
salt \* mine.update -b 50
|
||||
set -e
|
||||
@@ -691,13 +691,16 @@ verify_latest_update_script() {
|
||||
|
||||
# Keeping this block in case we need to do a hotfix that requires salt update
|
||||
apply_hotfix() {
|
||||
# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
|
||||
# fix_wazuh
|
||||
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
|
||||
salt-call state.apply elasticfleet -l info queue=True
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
|
||||
# 2_3_10_hotfix_1
|
||||
# else
|
||||
else
|
||||
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||||
# fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -733,14 +736,8 @@ main() {
|
||||
echo ""
|
||||
|
||||
set_os
|
||||
if ! check_salt_master_status; then
|
||||
echo "Could not talk to salt master"
|
||||
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
|
||||
echo "SOUP will now attempt to start the salt-master service and exit."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "This node can communicate with the salt-master."
|
||||
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
|
||||
|
||||
echo "Checking to see if this is a manager."
|
||||
echo ""
|
||||
@@ -826,7 +823,7 @@ main() {
|
||||
else
|
||||
update_registry
|
||||
set +e
|
||||
update_docker_containers "soup" "" "" "$SOUP_LOG"
|
||||
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
|
||||
set -e
|
||||
fi
|
||||
|
||||
@@ -878,7 +875,7 @@ main() {
|
||||
# Testing that salt-master is up by checking that is it connected to itself
|
||||
set +e
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
|
||||
# update the salt-minion configs here and start the minion
|
||||
@@ -914,7 +911,7 @@ main() {
|
||||
|
||||
set +e
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
|
||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||
|
||||
81
salt/salt/engines/master/checkmine.py
Normal file
81
salt/salt/engines/master/checkmine.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from time import sleep
|
||||
import os
|
||||
import salt.client
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
local = salt.client.LocalClient()
|
||||
|
||||
def start(interval=60):
|
||||
def mine_delete(minion, func):
|
||||
log.warning('checkmine engine: deleting mine function %s for %s' % (func, minion))
|
||||
local.cmd(minion, 'mine.delete', [func])
|
||||
|
||||
def mine_flush(minion):
|
||||
log.warning('checkmine engine: flushing mine cache for %s' % minion)
|
||||
local.cmd(minion, 'mine.flush')
|
||||
|
||||
def mine_update(minion):
|
||||
log.warning('checkmine engine: updating mine cache for %s' % minion)
|
||||
local.cmd(minion, 'mine.update')
|
||||
|
||||
log.info("checkmine engine: started")
|
||||
cachedir = __opts__['cachedir']
|
||||
while True:
|
||||
log.debug('checkmine engine: checking which minions are alive')
|
||||
manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=False)
|
||||
log.debug('checkmine engine: alive minions: %s' % ' , '.join(manage_alived))
|
||||
|
||||
for minion in manage_alived:
|
||||
mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p')
|
||||
# it is possible that a minion is alive, but hasn't created a mine file yet
|
||||
try:
|
||||
mine_size = os.path.getsize(mine_path)
|
||||
log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size))
|
||||
# For some reason the mine file can be corrupt and only be 1 byte in size
|
||||
if mine_size == 1:
|
||||
log.error('checkmine engine: found %s to be 1 byte' % mine_path)
|
||||
mine_flush(minion)
|
||||
mine_update(minion)
|
||||
continue
|
||||
except FileNotFoundError:
|
||||
log.warning('checkmine engine: minion: %s %s does not exist' % (minion, mine_path))
|
||||
mine_flush(minion)
|
||||
mine_update(minion)
|
||||
continue
|
||||
|
||||
# if a manager check that the ca in in the mine and it is correct
|
||||
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
|
||||
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
|
||||
try:
|
||||
ca_crt = x509[minion]['/etc/pki/ca.crt']
|
||||
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
|
||||
# since the cert is defined, make sure it is valid
|
||||
import salt.modules.x509_v2 as x509_v2
|
||||
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
|
||||
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
|
||||
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
|
||||
mine_delete(minion, 'x509.get_pem_entries')
|
||||
mine_update(minion)
|
||||
continue
|
||||
else:
|
||||
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
|
||||
except IndexError:
|
||||
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
|
||||
mine_delete(minion, 'x509.get_pem_entries')
|
||||
mine_update(minion)
|
||||
continue
|
||||
|
||||
# Update the mine if the ip in the mine doesn't match returned from manage.alived
|
||||
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
|
||||
try:
|
||||
mine_ip = network_ip_addrs[minion][0]
|
||||
log.debug('checkmine engine: found minion %s has mine_ip: %s' % (minion, mine_ip))
|
||||
except IndexError:
|
||||
log.error('checkmine engine: found minion %s does\'t have a mine_ip' % (minion))
|
||||
mine_delete(minion, 'network.ip_addrs')
|
||||
mine_update(minion)
|
||||
|
||||
sleep(interval)
|
||||
6
salt/salt/files/engines.conf
Normal file
6
salt/salt/files/engines.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
engines_dirs:
|
||||
- /etc/salt/engines
|
||||
|
||||
engines:
|
||||
- checkmine:
|
||||
interval: 60
|
||||
@@ -12,22 +12,34 @@ hold_salt_master_package:
|
||||
- name: salt-master
|
||||
{% endif %}
|
||||
|
||||
# prior to 2.4.30 this engine ran on the manager with salt-minion
|
||||
# this has changed to running with the salt-master in 2.4.30
|
||||
remove_engines_config:
|
||||
file.absent:
|
||||
- name: /etc/salt/minion.d/engines.conf
|
||||
- source: salt://salt/files/engines.conf
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
|
||||
checkmine_engine:
|
||||
file.managed:
|
||||
- name: /etc/salt/engines/checkmine.py
|
||||
- source: salt://salt/engines/master/checkmine.py
|
||||
- makedirs: True
|
||||
|
||||
engines_config:
|
||||
file.managed:
|
||||
- name: /etc/salt/master.d/engines.conf
|
||||
- source: salt://salt/files/engines.conf
|
||||
|
||||
salt_master_service:
|
||||
service.running:
|
||||
- name: salt-master
|
||||
- enable: True
|
||||
|
||||
checkmine_engine:
|
||||
file.absent:
|
||||
- name: /etc/salt/engines/checkmine.py
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
|
||||
engines_config:
|
||||
file.absent:
|
||||
- name: /etc/salt/minion.d/engines.conf
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
- watch:
|
||||
- file: checkmine_engine
|
||||
- file: engines_config
|
||||
- order: last
|
||||
|
||||
{% else %}
|
||||
|
||||
|
||||
@@ -67,6 +67,9 @@ set_log_levels:
|
||||
- "log_level: info"
|
||||
- "log_level_logfile: info"
|
||||
|
||||
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
|
||||
# since this file is currently only adding a sleep timer on service start
|
||||
# it is not required to restart the service
|
||||
salt_minion_service_unit_file:
|
||||
file.managed:
|
||||
- name: {{ SYSTEMD_UNIT_FILE }}
|
||||
@@ -89,6 +92,5 @@ salt_minion_service:
|
||||
- file: mine_functions
|
||||
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
||||
- file: set_log_levels
|
||||
- file: salt_minion_service_unit_file
|
||||
{% endif %}
|
||||
- order: last
|
||||
|
||||
10
salt/sensoroni/files/analyzers/malwarehashregistry/README.md
Normal file
10
salt/sensoroni/files/analyzers/malwarehashregistry/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Malware Hash Registry
|
||||
|
||||
## Description
|
||||
Search Team Cymru's Malware Hash Registry for a file hash.
|
||||
|
||||
## Configuration Requirements
|
||||
|
||||
None.
|
||||
|
||||
**NOTE:** If you try to run the Malware Hash Registry analyzer but it results in a "Name or service not known" error, then it may be a DNS issue. Folks using 8.8.4.4 or 8.8.8.8 as their DNS resolver have reported this issue. A potential workaround is to switch to another DNS resolver like 1.1.1.1.
|
||||
@@ -13,11 +13,13 @@
|
||||
{% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %}
|
||||
{% endfor %}
|
||||
|
||||
{# add nodes from the logstash:nodes pillar to soc.server.modules.elastic.remoteHostUrls #}
|
||||
{# add all grid heavy nodes to soc.server.modules.elastic.remoteHostUrls #}
|
||||
{% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %}
|
||||
{% for m in minions.keys() %}
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
|
||||
{% endfor %}
|
||||
{% if node_type in ['heavynode'] %}
|
||||
{% for m in minions.keys() %}
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
||||
|
||||
@@ -49,12 +49,13 @@ zeek:
|
||||
- frameworks/files/hash-all-files
|
||||
- frameworks/files/detect-MHR
|
||||
- policy/frameworks/notice/extend-email/hostnames
|
||||
- policy/frameworks/notice/community-id
|
||||
- policy/protocols/conn/community-id-logging
|
||||
- ja3
|
||||
- hassh
|
||||
- intel
|
||||
- cve-2020-0601
|
||||
- securityonion/bpfconf
|
||||
- securityonion/communityid
|
||||
- securityonion/file-extraction
|
||||
- oui-logging
|
||||
- icsnpp-modbus
|
||||
@@ -75,7 +76,7 @@ zeek:
|
||||
- LogAscii::use_json = T;
|
||||
- CaptureLoss::watch_interval = 5 mins;
|
||||
networks:
|
||||
HOME_NET:
|
||||
HOME_NET:
|
||||
- 192.168.0.0/16
|
||||
- 10.0.0.0/8
|
||||
- 172.16.0.0/12
|
||||
@@ -120,4 +121,4 @@ zeek:
|
||||
- stats
|
||||
- stderr
|
||||
- stdout
|
||||
|
||||
|
||||
|
||||
@@ -2111,11 +2111,6 @@ saltify() {
|
||||
|
||||
}
|
||||
|
||||
# Run a salt command to generate the minion key
|
||||
salt_firstcheckin() {
|
||||
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
|
||||
}
|
||||
|
||||
salt_install_module_deps() {
|
||||
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/"
|
||||
logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/"
|
||||
@@ -2500,6 +2495,16 @@ wait_for_file() {
|
||||
|
||||
wait_for_salt_minion() {
|
||||
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup
|
||||
local attempt=0
|
||||
# each attempts would take about 15 seconds
|
||||
local maxAttempts=20
|
||||
until check_salt_minion_status; do
|
||||
attempt=$((attempt+1))
|
||||
if [[ $attempt -gt $maxAttempts ]]; then
|
||||
fail_setup
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
}
|
||||
|
||||
verify_setup() {
|
||||
|
||||
@@ -91,7 +91,7 @@ fi
|
||||
# if packages are updated and the box isn't rebooted
|
||||
if [[ $is_debian ]]; then
|
||||
update_packages
|
||||
if [[ -f "/var/run/reboot-required" ]]; then
|
||||
if [[ -f "/var/run/reboot-required" ]] && [ -z "$test_profile" ]; then
|
||||
whiptail_debian_reboot_required
|
||||
reboot
|
||||
fi
|
||||
@@ -714,6 +714,17 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
|
||||
logCmd "salt-call state.apply common.packages"
|
||||
logCmd "salt-call state.apply common"
|
||||
# this will apply the salt.minion state first since salt.master includes salt.minion
|
||||
logCmd "salt-call state.apply salt.master"
|
||||
# wait here until we get a response from the salt-master since it may have just restarted
|
||||
# exit setup after 5-6 minutes of trying
|
||||
check_salt_master_status || fail "Can't access salt master or it is not ready"
|
||||
# apply the ca state to create the ca and put it in the mine early in the install
|
||||
# the minion ip will already be in the mine from configure_minion function in so-functions
|
||||
generate_ca
|
||||
# this will also call the ssl state since docker requires the intca
|
||||
# the salt-minion service will need to be up on the manager to sign requests
|
||||
generate_ssl
|
||||
logCmd "salt-call state.apply docker"
|
||||
firewall_generate_templates
|
||||
set_initial_firewall_policy
|
||||
@@ -721,8 +732,6 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
title "Downloading Elastic Agent Artifacts"
|
||||
download_elastic_agent_artifacts
|
||||
|
||||
generate_ca
|
||||
generate_ssl
|
||||
logCmd "salt-call state.apply -l info firewall"
|
||||
|
||||
# create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf
|
||||
@@ -768,8 +777,6 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
checkin_at_boot
|
||||
set_initial_firewall_access
|
||||
logCmd "salt-call schedule.enable -linfo --local"
|
||||
systemctl restart salt-master
|
||||
systemctl restart salt-minion
|
||||
verify_setup
|
||||
else
|
||||
touch /root/accept_changes
|
||||
|
||||
BIN
sigs/securityonion-2.4.20-20231012.iso.sig
Normal file
BIN
sigs/securityonion-2.4.20-20231012.iso.sig
Normal file
Binary file not shown.
Reference in New Issue
Block a user