Merge pull request #11572 from Security-Onion-Solutions/minechanges

This commit is contained in:
Mike Reeves
2023-10-18 19:37:34 -04:00
committed by GitHub
8 changed files with 56 additions and 7 deletions

View File

@@ -50,6 +50,12 @@ pki_public_ca_crt:
attempts: 5 attempts: 5
interval: 30 interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms: cakeyperms:
file.managed: file.managed:
- replace: False - replace: False

View File

@@ -153,8 +153,8 @@ check_salt_master_status() {
} }
check_salt_minion_status() { check_salt_minion_status() {
local timeout=$1 local timeout="${1:-5}"
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1
local status=$? local status=$?
if [ $status -gt 0 ]; then if [ $status -gt 0 ]; then

View File

@@ -6,6 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# include ssl since docker service requires the intca
include:
- ssl
dockergroup: dockergroup:
group.present: group.present:
@@ -86,6 +89,11 @@ docker_running:
- enable: True - enable: True
- watch: - watch:
- file: docker_daemon - file: docker_daemon
- x509: trusttheca
- require:
- file: docker_daemon
- x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present # Reserve OS ports for Docker proxy in case boot settings are not already applied/present
# 57314 = Strelka, 47760-47860 = Zeek # 57314 = Strelka, 47760-47860 = Zeek

View File

@@ -578,7 +578,7 @@ update_centos_repo() {
} }
update_salt_mine() { update_salt_mine() {
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host." echo "Populating the mine with mine_functions for each host."
set +e set +e
salt \* mine.update -b 50 salt \* mine.update -b 50
set -e set -e

View File

@@ -46,6 +46,28 @@ def start(interval=60):
mine_update(minion) mine_update(minion)
continue continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived # Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try: try:

View File

@@ -2495,6 +2495,16 @@ wait_for_file() {
wait_for_salt_minion() { wait_for_salt_minion() {
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status; do
attempt=$((attempt+1))
if [[ $attempt -gt $maxAttempts ]]; then
fail_setup
fi
sleep 10
done
} }
verify_setup() { verify_setup() {

View File

@@ -714,12 +714,17 @@ if ! [[ -f $install_opt_file ]]; then
logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common.packages"
logCmd "salt-call state.apply common" logCmd "salt-call state.apply common"
# this will apply the salt.minion state first since salt.master includes salt.minion
logCmd "salt-call state.apply salt.master" logCmd "salt-call state.apply salt.master"
# wait here until we get a response from the salt-master since it may have just restarted # wait here until we get a response from the salt-master since it may have just restarted
# exit setup after 5-6 minutes of trying # exit setup after 5-6 minutes of trying
check_salt_master_status || fail "Can't access salt master or it is not ready" check_salt_master_status || fail "Can't access salt master or it is not ready"
# apply the ca state to create the ca and put it in the mine early in the install
# the minion ip will already be in the mine from configure_minion function in so-functions
generate_ca
# this will also call the ssl state since docker requires the intca
# the salt-minion service will need to be up on the manager to sign requests
generate_ssl
logCmd "salt-call state.apply docker" logCmd "salt-call state.apply docker"
firewall_generate_templates firewall_generate_templates
set_initial_firewall_policy set_initial_firewall_policy
@@ -727,8 +732,6 @@ if ! [[ -f $install_opt_file ]]; then
title "Downloading Elastic Agent Artifacts" title "Downloading Elastic Agent Artifacts"
download_elastic_agent_artifacts download_elastic_agent_artifacts
generate_ca
generate_ssl
logCmd "salt-call state.apply -l info firewall" logCmd "salt-call state.apply -l info firewall"
# create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf # create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf