Merge branch 'dev' into kilo

This commit is contained in:
Jason Ertel
2022-01-07 13:41:35 -05:00
15 changed files with 564 additions and 185 deletions

4
salt/ca/dirs.sls Normal file
View File

@@ -0,0 +1,4 @@
pki_issued_certs:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True

View File

@@ -1,3 +1,6 @@
mine_functions:
x509.get_pem_entries: [/etc/pki/ca.crt]
x509_signing_policies:
filebeat:
- minions: '*'

View File

@@ -1,17 +1,14 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
include:
- ca.dirs
{% set manager = salt['grains.get']('master') %}
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
/etc/pki:
file.directory: []
/etc/pki/issued_certs:
file.directory: []
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
@@ -42,7 +39,7 @@ pki_public_ca_crt:
- backup: True
- replace: False
- require:
- file: /etc/pki
- sls: ca.dirs
- timeout: 30
- retry:
attempts: 5

View File

@@ -108,7 +108,7 @@ CANCURL=$(curl -sI https://securityonionsolutions.com/ | grep "200 OK")
while [[ $CURLCONTINUE != "yes" ]] && [[ $CURLCONTINUE != "no" ]]; do
if [[ "$FIRSTPASS" == "yes" ]]; then
echo "We could not access https://securityonionsolutions.com/."
echo "Since packages are downloaded from the internet, internet acceess is required."
echo "Since packages are downloaded from the internet, internet access is required."
echo "If you would like to ignore this warning and continue anyway, please type 'yes'."
echo "Otherwise, type 'no' to exit."
FIRSTPASS=no

View File

@@ -298,6 +298,7 @@ retry() {
sleepDelay=$2
cmd=$3
expectedOutput=$4
failedOutput=$5
attempt=0
local exitcode=0
while [[ $attempt -lt $maxAttempts ]]; do
@@ -308,12 +309,28 @@ retry() {
echo "Results: $output ($exitcode)"
if [ -n "$expectedOutput" ]; then
if [[ "$output" =~ "$expectedOutput" ]]; then
return $exitCode
return $exitcode
else
echo "Expected '$expectedOutput' but got '$output'"
echo "Did not find expectedOutput: '$expectedOutput' in the output below from running the command: '$cmd'"
echo "<Start of output>"
echo "$output"
echo "<End of output>"
fi
elif [ -n "$failedOutput" ]; then
if [[ "$output" =~ "$failedOutput" ]]; then
echo "Found failedOutput: '$failedOutput' in the output below from running the command: '$cmd'"
echo "<Start of output>"
echo "$output"
echo "<End of output>"
if [[ $exitcode -eq 0 ]]; then
echo "The exitcode was 0, but we are setting to 1 since we found $failedOutput in the output."
exitcode=1
fi
else
return $exitcode
fi
elif [[ $exitcode -eq 0 ]]; then
return $exitCode
return $exitcode
fi
echo "Command failed with exit code $exitcode; will retry in $sleepDelay seconds ($attempt / $maxAttempts)..."
sleep $sleepDelay

View File

@@ -399,6 +399,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
[[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100
true
}
@@ -410,6 +411,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40
[[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60
[[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90
[[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100
true
}
@@ -459,11 +461,12 @@ post_to_2.3.90() {
fi
fi
POSTVERSION=2.3.90
}
post_to_2.3.100() {
echo "Post Processing for .100"
}
up_to_2.3.20(){
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
@@ -615,6 +618,9 @@ up_to_2.3.90() {
fi
done
# There was a bug in 2.3.0 so-firewall addhostgroup that was resolved in 2.3.1 - commit 32294eb2ed30ac74b15bb4bfab687084a928daf2
echo "Verify so-firewall is up to date"
verify_latest_so-firewall_script
# Create Endgame Hostgroup
echo "Adding endgame hostgroup with so-firewall"
if so-firewall addhostgroup endgame 2>&1 | grep -q 'Already exists'; then
@@ -657,6 +663,14 @@ up_to_2.3.90() {
INSTALLEDVERSION=2.3.90
}
up_to_2.3.100() {
echo "Updating to Security Onion to 2.3.100"
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
set +e
salt \* cmd.run cmd='MAININT=$(salt-call pillar.get host:mainint --out=newline_values_only) && salt-call mine.send name=network.ip_addrs interface="$MAININT"'
set -e
fix_wazuh
}
verify_upgradespace() {
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
@@ -853,10 +867,34 @@ verify_latest_update_script() {
fi
}
verify_latest_so-firewall_script() {
# Check to see if the so-firewall script matches. If not run the new one.
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-firewall | awk '{print $1}')
if [[ "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "This version of the so-firewall script is up to date. Proceeding."
else
echo "You are not running the latest version of so-firewall. Updating so-firewall."
cp $UPDATE_DIR/salt/common/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-firewall /usr/sbin/
echo ""
echo "so-firewall has been updated."
fi
}
apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
fix_wazuh
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
fi
}
fix_wazuh() {
FILE="/nsm/wazuh/etc/ossec.conf"
echo "Detecting if ossec.conf needs corrected..."
echo "Detecting if $FILE needs corrected..."
if [ -f "$FILE" ]; then
if head -1 $FILE | grep -q "xml version"; then
echo "$FILE has an XML header; removing"
sed -i 1d $FILE
@@ -865,11 +903,10 @@ apply_hotfix() {
echo "$FILE does not have an XML header, so no changes are necessary."
fi
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
echo "$FILE does not exist, so no changes are necessary."
fi
}
main() {
trap 'check_err $?' EXIT
@@ -1178,8 +1215,16 @@ Please review the following for more information about the update process and re
https://docs.securityonion.net/soup
https://blog.securityonion.net
Press Enter to continue or Ctrl-C to cancel.
EOF
if [ -n "$BRANCH" ]; then
cat << EOF
SOUP will use the $BRANCH branch.
EOF
fi
cat << EOF
Press Enter to continue or Ctrl-C to cancel.
EOF
read -r input

View File

@@ -56,6 +56,12 @@ elasticsearch:
query:
bool:
max_clause_count: 1500
id_field_data:
enabled: false
logger:
org:
elasticsearch:
deprecation: ERROR

View File

@@ -337,6 +337,8 @@ so-elasticsearch-pipelines:
- file: esingestdynamicconf
- file: esyml
- file: so-elasticsearch-pipelines-file
- require:
- docker_container: so-elasticsearch
{% if TEMPLATES %}
so-elasticsearch-templates:
@@ -344,6 +346,8 @@ so-elasticsearch-templates:
- name: /usr/sbin/so-elasticsearch-templates-load
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
{% endif %}
so-elasticsearch-roles-load:
@@ -351,6 +355,8 @@ so-elasticsearch-roles-load:
- name: /usr/sbin/so-elasticsearch-roles-load
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
{% endif %} {# if grains['role'] != 'so-helix' #}

View File

@@ -167,7 +167,54 @@
},
"agent": {
"type":"object",
"dynamic": true
"dynamic": true,
"properties": {
"ephemeral_id": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"id": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"type": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"version": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"as":{
"type":"object",
@@ -226,16 +273,163 @@
"dynamic": true
},
"ecs": {
"type":"object",
"dynamic": true
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"error":{
"type":"object",
"dynamic": true
},
"event": {
"type":"object",
"dynamic": true
"properties": {
"action": {
"ignore_above": 1024,
"type": "keyword"
},
"category": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"code": {
"ignore_above": 1024,
"type": "keyword"
},
"created": {
"type": "date",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"dataset": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"duration": {
"type": "long"
},
"end": {
"type": "date"
},
"hash": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"ingested": {
"type": "date",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"kind": {
"ignore_above": 1024,
"type": "keyword"
},
"module": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"original": {
"doc_values": false,
"ignore_above": 1024,
"index": false,
"type": "keyword"
},
"outcome": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"reference": {
"ignore_above": 1024,
"type": "keyword"
},
"risk_score": {
"type": "float"
},
"risk_score_norm": {
"type": "float"
},
"sequence": {
"type": "long"
},
"severity": {
"type": "long"
},
"severity_label": {
"ignore_above": 1024,
"type": "keyword".
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"start": {
"type": "date"
},
"timezone": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"type": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"url": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"event_data":{
"type":"object",
@@ -267,11 +461,97 @@
},
"host":{
"type":"object",
"dynamic": true
"dynamic": true,
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"http":{
"type":"object",
"dynamic": true
"dynamic": true,
"properties": {
"request": {
"properties": {
"body": {
"properties": {
"bytes": {
"type": "long"
},
"content": {
"fields": {
"text": {
"norms": false,
"type": "text"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
},
"bytes": {
"type": "long"
},
"method": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"referrer": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"response": {
"properties": {
"body": {
"properties": {
"bytes": {
"type": "long"
},
"content": {
"fields": {
"text": {
"norms": false,
"type": "text"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
},
"bytes": {
"type": "long"
},
"status_code": {
"type": "long"
}
}
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"import":{
"type":"object",
@@ -318,7 +598,18 @@
},
"log":{
"type":"object",
"dynamic": true
"dynamic": true,
"properties": {
"level": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"logscan": {
"type": "object",
@@ -436,7 +727,27 @@
},
"service":{
"type":"object",
"dynamic": true
"dynamic": true,
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"sip":{
"type":"object",
@@ -464,7 +775,18 @@
},
"source":{
"type":"object",
"dynamic": true
"dynamic": true,
"properties" : {
"address": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"ssh":{
"type":"object",
@@ -479,7 +801,8 @@
"dynamic": true
},
"tags": {
"type":"text",
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
@@ -508,7 +831,22 @@
},
"user_agent":{
"type":"object",
"dynamic": true
"dynamic": true,
"properties": {
"original": {
"fields": {
"keyword": {
"type": "keyword"
},
"text": {
"norms": false,
"type": "text"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
},
"version":{
"type":"object",

File diff suppressed because one or more lines are too long

View File

@@ -219,6 +219,8 @@ path.logs: /var/log/logstash
# path.plugins: []
{% set pipeline_workers = salt['pillar.get']('logstash_settings:ls_pipeline_workers', '1') %}
{% set pipeline_batch = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', '125') %}
{% set pipeline_ecs_compatibility = salt['pillar.get']('logstash_settings:ls_ecs_compatibility', 'disabled') %}
pipeline.workers: {{ pipeline_workers }}
pipeline.batch.size: {{ pipeline_batch }}
pipeline.ecs_compatibility: {{ pipeline_ecs_compatibility }}

View File

@@ -16,12 +16,14 @@
{% endif %}
{% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import', 'helixsensor'] %}
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
{% set ca_server = grains.id %}
include:
- ca
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
{% set ca_server = grains.id %}
{% else %}
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
include:
- ca.dirs
{% set x509dict = salt['mine.get'](manager~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}

View File

@@ -760,9 +760,11 @@ configure_minion() {
case "$minion_type" in
'helix')
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
echo "master: '$HOSTNAME'" >> "$minion_config"
;;
'manager' | 'eval' | 'managersearch' | 'standalone' | 'import')
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
printf '%s\n'\
"master: '$HOSTNAME'"\
"mysql.host: '$MAINIP'"\
@@ -1031,6 +1033,8 @@ copy_minion_tmp_files() {
} >> "$setup_log" 2>&1
;;
esac
echo "Syncing all salt modules." >> "$setup_log" 2>&1
salt-call saltutil.sync_modules >> "$setup_log" 2>&1
}
copy_ssh_key() {
@@ -1143,7 +1147,7 @@ installer_prereq_packages() {
logCmd "systemctl start NetworkManager"
elif [ "$OS" == ubuntu ]; then
# Print message to stdout so the user knows setup is doing something
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
# Install network manager so we can do interface stuff
if ! command -v nmcli > /dev/null 2>&1; then
retry 50 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1
@@ -1200,18 +1204,24 @@ docker_install() {
else
case "$install_type" in
'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT')
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
;;
*)
retry 50 10 "apt-key add $temp_install_dir/gpg/docker.pub" >> "$setup_log" 2>&1 || exit 1
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" >> "$setup_log" 2>&1
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
;;
esac
if [ $OSVER == "bionic" ]; then
service docker stop
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
retry 50 10 "apt-get -y install docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
elif [ $OSVER == "focal" ]; then
service docker stop
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
retry 50 10 "apt-get -y install docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
fi
fi
docker_registry
@@ -1429,6 +1439,28 @@ fleet_pillar() {
"" > "$pillar_file"
}
generate_ca() {
{
echo "Building Certificate Authority";
salt-call state.apply ca;
echo "Confirming existence of the CA certificate"
openssl x509 -in /etc/pki/ca.crt -noout -subject -issuer -dates
} >> "$setup_log" 2>&1
}
generate_ssl() {
{
# if the install type is a manager then we need to wait for the minion to be ready before trying
# to run the ssl state since we need the minion to sign the certs
if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then
wait_for_salt_minion
fi
echo "Applying SSL state";
salt-call state.apply ssl;
} >> "$setup_log" 2>&1
}
generate_passwords(){
# Generate Random Passwords for Things
MYSQLPASS=$(get_random_value)
@@ -2010,6 +2042,9 @@ reserve_ports() {
reinstall_init() {
info "Putting system in state to run setup again"
# remove all of root's cronjobs
crontab -r -u root
if [[ $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then
local salt_services=( "salt-master" "salt-minion" )
else
@@ -2049,7 +2084,7 @@ reinstall_init() {
done
# Remove all salt configs
rm -rf /etc/salt/grains /etc/salt/minion /etc/salt/pki/*
rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/
if command -v docker &> /dev/null; then
# Stop and remove all so-* containers so files can be changed with more safety
@@ -2064,6 +2099,12 @@ reinstall_init() {
# Backup /opt/so since we'll be rebuilding this directory during setup
backup_dir /opt/so "$date_string"
# We need to restore these files during a reinstall so python3-influxdb state doesn't try to patch again
restore_file "/opt/so_old_$date_string/state/influxdb_continuous_query.py.patched" "/opt/so/state/"
restore_file "/opt/so_old_$date_string/state/influxdb_retention_policy.py.patched" "/opt/so/state/"
restore_file "/opt/so_old_$date_string/state/influxdbmod.py.patched" "/opt/so/state/"
# If the elastic license has been accepted restore the state file
restore_file "/opt/so_old_$date_string/state/yeselastic.txt" "/opt/so/state/"
# Backup directories in /nsm to prevent app errors
backup_dir /nsm/mysql "$date_string"
@@ -2097,6 +2138,16 @@ reset_proxy() {
fi
}
restore_file() {
src=$1
dst=$2
if [ -f "$src" ]; then
[ ! -d "$dst" ] && mkdir -v -p "$dst"
echo "Restoring $src to $dst." >> "$setup_log" 2>&1
cp -v "$src" "$dst"
fi
}
backup_dir() {
dir=$1
backup_suffix=$2
@@ -2233,7 +2284,7 @@ saltify() {
# Add repo
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
set_progress_str 6 'Installing various dependencies'
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
set_progress_str 7 'Installing salt-master'
@@ -2253,7 +2304,7 @@ saltify() {
;;
esac
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
set_progress_str 8 'Installing salt-minion & python modules'
retry 50 10 "apt-get -y install salt-minion=3003+ds-1 salt-common=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
@@ -2261,107 +2312,6 @@ saltify() {
fi
}
salt_checkin() {
case "$install_type" in
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # Fix Mine usage
{
echo "Building Certificate Authority";
salt-call state.apply ca;
echo " *** Restarting Salt to fix any SSL errors. ***";
local SALT_SERVICES=(\
"salt-master" \
"salt-minion"
)
local count=0
for service in "${SALT_SERVICES[@]}"; do
{
echo "Restarting service $service"
systemctl restart "$service" &
local pid=$!
} >> "$setup_log" 2>&1
count=0
while ! (check_service_status "$service"); do
# On final loop, kill the pid trying to restart service and try to manually kill then start it
if [ $count -eq 12 ]; then
{
kill -9 "$pid"
systemctl kill "$service"
systemctl start "$service" &
local pid=$!
} >> "$setup_log" 2>&1
fi
if [ $count -gt 12 ]; then
echo "$service could not be restarted in 120 seconds, exiting" >> "$setup_log" 2>&1
kill -9 "$pid"
exit 1
fi
sleep 10;
((count++))
done
done
count=1
timeout=60
while ! (check_salt_master_status $timeout); do
echo "salt minion cannot talk to salt master after $timeout seconds" >> "$setup_log" 2>&1
if [ $count -gt 2 ]; then
echo "salt minion could not talk to salt master after $count attempts, exiting" >> "$setup_log" 2>&1
exit 1
fi
sleep 1;
((count++))
((timeout+=30)) # add 30s to the timeout each attempt
done
count=1
timeout=60
while ! (check_salt_minion_status $timeout) ; do
echo "salt master did not get a job response from salt minion after $timeout seconds" >> "$setup_log" 2>&1
if [ $count -gt 2 ]; then
echo "salt master did not get a job response from salt minion after $count attempts, exiting" >> "$setup_log" 2>&1
exit 1
fi
systemctl kill salt-minion
systemctl start salt-minion
sleep 1;
((count++))
((timeout+=30)) # add 30s to the timeout each attempt
done
echo " Confirming existence of the CA certificate"
openssl x509 -in /etc/pki/ca.crt -noout -subject -issuer -dates
echo " Applyng a mine hack";
salt "$MINION_ID" mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt;
salt "$MINION_ID" mine.update;
echo "Confirming salt mine now contains the certificate";
salt "$MINION_ID" mine.get '*' x509.get_pem_entries | grep -E 'BEGIN CERTIFICATE|END CERTIFICATE';
if [ $? -eq 0 ]; then
echo "CA in mine"
else
echo "CA not in mine"
fi
echo " Applying SSL state";
salt-call state.apply ssl;
} >> "$setup_log" 2>&1
;;
*)
{
#salt-call state.apply ca;
salt-call state.apply ssl;
} >> "$setup_log" 2>&1
;;
esac
{
#salt-call state.apply ca;
salt-call state.apply ssl;
salt-call saltutil.sync_modules;
} >> "$setup_log" 2>&1
}
# Run a salt command to generate the minion key
salt_firstcheckin() {
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
@@ -2868,6 +2818,10 @@ wait_for_file() {
return 1
}
wait_for_salt_minion() {
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || exit 1
}
# Enable Zeek Logs
zeek_logs_enabled() {
echo "Enabling Zeek Logs" >> "$setup_log" 2>&1

View File

@@ -761,8 +761,13 @@ echo "1" > /root/accept_changes
salt-call state.apply -l info salt.minion >> $setup_log 2>&1
fi
set_progress_str 23 'Generating CA and checking in'
salt_checkin >> $setup_log 2>&1
if [[ $is_manager || $is_helix || $is_import ]]; then
set_progress_str 23 'Generating CA'
generate_ca >> $setup_log 2>&1
fi
set_progress_str 24 'Generating SSL'
generate_ssl >> $setup_log 2>&1
if [[ $is_manager || $is_helix || $is_import ]]; then
set_progress_str 25 'Configuring firewall'