Merge branch 'dev' into kilo

This commit is contained in:
Jason Ertel
2022-01-07 13:41:35 -05:00
15 changed files with 564 additions and 185 deletions

4
salt/ca/dirs.sls Normal file
View File

@@ -0,0 +1,4 @@
pki_issued_certs:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True

View File

@@ -1,3 +1,6 @@
mine_functions:
x509.get_pem_entries: [/etc/pki/ca.crt]
x509_signing_policies: x509_signing_policies:
filebeat: filebeat:
- minions: '*' - minions: '*'

View File

@@ -1,17 +1,14 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
include:
- ca.dirs
{% set manager = salt['grains.get']('master') %} {% set manager = salt['grains.get']('master') %}
/etc/salt/minion.d/signing_policies.conf: /etc/salt/minion.d/signing_policies.conf:
file.managed: file.managed:
- source: salt://ca/files/signing_policies.conf - source: salt://ca/files/signing_policies.conf
/etc/pki:
file.directory: []
/etc/pki/issued_certs:
file.directory: []
pki_private_key: pki_private_key:
x509.private_key_managed: x509.private_key_managed:
- name: /etc/pki/ca.key - name: /etc/pki/ca.key
@@ -42,7 +39,7 @@ pki_public_ca_crt:
- backup: True - backup: True
- replace: False - replace: False
- require: - require:
- file: /etc/pki - sls: ca.dirs
- timeout: 30 - timeout: 30
- retry: - retry:
attempts: 5 attempts: 5

View File

@@ -108,7 +108,7 @@ CANCURL=$(curl -sI https://securityonionsolutions.com/ | grep "200 OK")
while [[ $CURLCONTINUE != "yes" ]] && [[ $CURLCONTINUE != "no" ]]; do while [[ $CURLCONTINUE != "yes" ]] && [[ $CURLCONTINUE != "no" ]]; do
if [[ "$FIRSTPASS" == "yes" ]]; then if [[ "$FIRSTPASS" == "yes" ]]; then
echo "We could not access https://securityonionsolutions.com/." echo "We could not access https://securityonionsolutions.com/."
echo "Since packages are downloaded from the internet, internet acceess is required." echo "Since packages are downloaded from the internet, internet access is required."
echo "If you would like to ignore this warning and continue anyway, please type 'yes'." echo "If you would like to ignore this warning and continue anyway, please type 'yes'."
echo "Otherwise, type 'no' to exit." echo "Otherwise, type 'no' to exit."
FIRSTPASS=no FIRSTPASS=no

View File

@@ -294,32 +294,49 @@ require_manager() {
} }
retry() { retry() {
maxAttempts=$1 maxAttempts=$1
sleepDelay=$2 sleepDelay=$2
cmd=$3 cmd=$3
expectedOutput=$4 expectedOutput=$4
attempt=0 failedOutput=$5
local exitcode=0 attempt=0
while [[ $attempt -lt $maxAttempts ]]; do local exitcode=0
attempt=$((attempt+1)) while [[ $attempt -lt $maxAttempts ]]; do
echo "Executing command with retry support: $cmd" attempt=$((attempt+1))
output=$(eval "$cmd") echo "Executing command with retry support: $cmd"
exitcode=$? output=$(eval "$cmd")
echo "Results: $output ($exitcode)" exitcode=$?
if [ -n "$expectedOutput" ]; then echo "Results: $output ($exitcode)"
if [[ "$output" =~ "$expectedOutput" ]]; then if [ -n "$expectedOutput" ]; then
return $exitCode if [[ "$output" =~ "$expectedOutput" ]]; then
else return $exitcode
echo "Expected '$expectedOutput' but got '$output'" else
fi echo "Did not find expectedOutput: '$expectedOutput' in the output below from running the command: '$cmd'"
elif [[ $exitcode -eq 0 ]]; then echo "<Start of output>"
return $exitCode echo "$output"
fi echo "<End of output>"
echo "Command failed with exit code $exitcode; will retry in $sleepDelay seconds ($attempt / $maxAttempts)..." fi
sleep $sleepDelay elif [ -n "$failedOutput" ]; then
done if [[ "$output" =~ "$failedOutput" ]]; then
echo "Command continues to fail; giving up." echo "Found failedOutput: '$failedOutput' in the output below from running the command: '$cmd'"
return $exitcode echo "<Start of output>"
echo "$output"
echo "<End of output>"
if [[ $exitcode -eq 0 ]]; then
echo "The exitcode was 0, but we are setting to 1 since we found $failedOutput in the output."
exitcode=1
fi
else
return $exitcode
fi
elif [[ $exitcode -eq 0 ]]; then
return $exitcode
fi
echo "Command failed with exit code $exitcode; will retry in $sleepDelay seconds ($attempt / $maxAttempts)..."
sleep $sleepDelay
done
echo "Command continues to fail; giving up."
return $exitcode
} }
run_check_net_err() { run_check_net_err() {

View File

@@ -399,6 +399,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50 [[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80 [[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90 [[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
[[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100
true true
} }
@@ -410,6 +411,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40 [[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40
[[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60 [[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60
[[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90 [[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90
[[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100
true true
} }
@@ -459,11 +461,12 @@ post_to_2.3.90() {
fi fi
fi fi
POSTVERSION=2.3.90 POSTVERSION=2.3.90
} }
post_to_2.3.100() {
echo "Post Processing for .100"
}
up_to_2.3.20(){ up_to_2.3.20(){
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
@@ -615,6 +618,9 @@ up_to_2.3.90() {
fi fi
done done
# There was a bug in 2.3.0 so-firewall addhostgroup that was resolved in 2.3.1 - commit 32294eb2ed30ac74b15bb4bfab687084a928daf2
echo "Verify so-firewall is up to date"
verify_latest_so-firewall_script
# Create Endgame Hostgroup # Create Endgame Hostgroup
echo "Adding endgame hostgroup with so-firewall" echo "Adding endgame hostgroup with so-firewall"
if so-firewall addhostgroup endgame 2>&1 | grep -q 'Already exists'; then if so-firewall addhostgroup endgame 2>&1 | grep -q 'Already exists'; then
@@ -657,6 +663,14 @@ up_to_2.3.90() {
INSTALLEDVERSION=2.3.90 INSTALLEDVERSION=2.3.90
} }
up_to_2.3.100() {
echo "Updating to Security Onion to 2.3.100"
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
set +e
salt \* cmd.run cmd='MAININT=$(salt-call pillar.get host:mainint --out=newline_values_only) && salt-call mine.send name=network.ip_addrs interface="$MAININT"'
set -e
fix_wazuh
}
verify_upgradespace() { verify_upgradespace() {
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//') CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
@@ -853,22 +867,45 @@ verify_latest_update_script() {
fi fi
} }
verify_latest_so-firewall_script() {
# Check to see if the so-firewall script matches. If not run the new one.
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-firewall | awk '{print $1}')
if [[ "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "This version of the so-firewall script is up to date. Proceeding."
else
echo "You are not running the latest version of so-firewall. Updating so-firewall."
cp $UPDATE_DIR/salt/common/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-firewall /usr/sbin/
echo ""
echo "so-firewall has been updated."
fi
}
apply_hotfix() { apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
FILE="/nsm/wazuh/etc/ossec.conf" fix_wazuh
echo "Detecting if ossec.conf needs corrected..."
if head -1 $FILE | grep -q "xml version"; then
echo "$FILE has an XML header; removing"
sed -i 1d $FILE
so-wazuh-restart
else
echo "$FILE does not have an XML header, so no changes are necessary."
fi
else else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
fi fi
} }
fix_wazuh() {
FILE="/nsm/wazuh/etc/ossec.conf"
echo "Detecting if $FILE needs corrected..."
if [ -f "$FILE" ]; then
if head -1 $FILE | grep -q "xml version"; then
echo "$FILE has an XML header; removing"
sed -i 1d $FILE
so-wazuh-restart
else
echo "$FILE does not have an XML header, so no changes are necessary."
fi
else
echo "$FILE does not exist, so no changes are necessary."
fi
}
main() { main() {
trap 'check_err $?' EXIT trap 'check_err $?' EXIT
@@ -1178,8 +1215,16 @@ Please review the following for more information about the update process and re
https://docs.securityonion.net/soup https://docs.securityonion.net/soup
https://blog.securityonion.net https://blog.securityonion.net
Press Enter to continue or Ctrl-C to cancel. EOF
if [ -n "$BRANCH" ]; then
cat << EOF
SOUP will use the $BRANCH branch.
EOF
fi
cat << EOF
Press Enter to continue or Ctrl-C to cancel.
EOF EOF
read -r input read -r input

View File

@@ -56,6 +56,12 @@ elasticsearch:
query: query:
bool: bool:
max_clause_count: 1500 max_clause_count: 1500
id_field_data:
enabled: false
logger:
org:
elasticsearch:
deprecation: ERROR

View File

@@ -32,7 +32,7 @@
{ "rename": { "field": "data.win.eventdata.targetFilename", "target_field": "file.target", "ignore_missing": true } }, { "rename": { "field": "data.win.eventdata.targetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } }, { "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "data.win.system", "target_field": "winlog", "ignore_missing": true } }, { "rename": { "field": "data.win.system", "target_field": "winlog", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata", "target_field": "winlog.event_data", "ignore_missing": true } }, { "rename": { "field": "data.win.eventdata", "target_field": "winlog.event_data", "ignore_missing": true } },
{ "rename": { "field": "data", "target_field": "wazuh.data", "ignore_missing": true } }, { "rename": { "field": "data", "target_field": "wazuh.data", "ignore_missing": true } },
{ "rename": { "field": "winlog.eventID", "target_field": "winlog.event_id", "ignore_missing": true } }, { "rename": { "field": "winlog.eventID", "target_field": "winlog.event_id", "ignore_missing": true } },
{ "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } }, { "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } },

View File

@@ -330,13 +330,15 @@ so-elasticsearch-pipelines-file:
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }} ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
so-elasticsearch-pipelines: so-elasticsearch-pipelines:
cmd.run: cmd.run:
- name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines {{ grains.host }} - name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines {{ grains.host }}
- onchanges: - onchanges:
- file: esingestconf - file: esingestconf
- file: esingestdynamicconf - file: esingestdynamicconf
- file: esyml - file: esyml
- file: so-elasticsearch-pipelines-file - file: so-elasticsearch-pipelines-file
- require:
- docker_container: so-elasticsearch
{% if TEMPLATES %} {% if TEMPLATES %}
so-elasticsearch-templates: so-elasticsearch-templates:
@@ -344,6 +346,8 @@ so-elasticsearch-templates:
- name: /usr/sbin/so-elasticsearch-templates-load - name: /usr/sbin/so-elasticsearch-templates-load
- cwd: /opt/so - cwd: /opt/so
- template: jinja - template: jinja
- require:
- docker_container: so-elasticsearch
{% endif %} {% endif %}
so-elasticsearch-roles-load: so-elasticsearch-roles-load:
@@ -351,6 +355,8 @@ so-elasticsearch-roles-load:
- name: /usr/sbin/so-elasticsearch-roles-load - name: /usr/sbin/so-elasticsearch-roles-load
- cwd: /opt/so - cwd: /opt/so
- template: jinja - template: jinja
- require:
- docker_container: so-elasticsearch
{% endif %} {# if grains['role'] != 'so-helix' #} {% endif %} {# if grains['role'] != 'so-helix' #}

View File

@@ -165,10 +165,57 @@
} }
} }
}, },
"agent":{ "agent": {
"type":"object", "type":"object",
"dynamic": true "dynamic": true,
}, "properties": {
"ephemeral_id": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"id": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"type": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"version": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"as":{ "as":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true
@@ -225,17 +272,164 @@
"type":"object", "type":"object",
"dynamic": true "dynamic": true
}, },
"ecs":{ "ecs": {
"type":"object", "properties": {
"dynamic": true "version": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}, },
"error":{ "error":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true
}, },
"event":{ "event": {
"type":"object", "properties": {
"dynamic": true "action": {
"ignore_above": 1024,
"type": "keyword"
},
"category": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"code": {
"ignore_above": 1024,
"type": "keyword"
},
"created": {
"type": "date",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"dataset": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"duration": {
"type": "long"
},
"end": {
"type": "date"
},
"hash": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"ingested": {
"type": "date",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"kind": {
"ignore_above": 1024,
"type": "keyword"
},
"module": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"original": {
"doc_values": false,
"ignore_above": 1024,
"index": false,
"type": "keyword"
},
"outcome": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"reference": {
"ignore_above": 1024,
"type": "keyword"
},
"risk_score": {
"type": "float"
},
"risk_score_norm": {
"type": "float"
},
"sequence": {
"type": "long"
},
"severity": {
"type": "long"
},
"severity_label": {
"ignore_above": 1024,
"type": "keyword".
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"start": {
"type": "date"
},
"timezone": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"type": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"url": {
"ignore_above": 1024,
"type": "keyword"
}
}
}, },
"event_data":{ "event_data":{
"type":"object", "type":"object",
@@ -267,11 +461,97 @@
}, },
"host":{ "host":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true,
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}, },
"http":{ "http":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true,
"properties": {
"request": {
"properties": {
"body": {
"properties": {
"bytes": {
"type": "long"
},
"content": {
"fields": {
"text": {
"norms": false,
"type": "text"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
},
"bytes": {
"type": "long"
},
"method": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"referrer": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"response": {
"properties": {
"body": {
"properties": {
"bytes": {
"type": "long"
},
"content": {
"fields": {
"text": {
"norms": false,
"type": "text"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
},
"bytes": {
"type": "long"
},
"status_code": {
"type": "long"
}
}
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
}, },
"import":{ "import":{
"type":"object", "type":"object",
@@ -318,7 +598,18 @@
}, },
"log":{ "log":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true,
"properties": {
"level": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}, },
"logscan": { "logscan": {
"type": "object", "type": "object",
@@ -436,7 +727,27 @@
}, },
"service":{ "service":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true,
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}, },
"sip":{ "sip":{
"type":"object", "type":"object",
@@ -462,9 +773,20 @@
"type":"object", "type":"object",
"dynamic": true "dynamic": true
}, },
"source":{ "source":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true,
"properties" : {
"address": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}, },
"ssh":{ "ssh":{
"type":"object", "type":"object",
@@ -478,11 +800,12 @@
"type":"object", "type":"object",
"dynamic": true "dynamic": true
}, },
"tags":{ "tags": {
"type":"text", "ignore_above": 1024,
"fields":{ "type": "keyword",
"keyword":{ "fields": {
"type":"keyword" "keyword": {
"type": "keyword"
} }
} }
}, },
@@ -508,7 +831,22 @@
}, },
"user_agent":{ "user_agent":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true,
"properties": {
"original": {
"fields": {
"keyword": {
"type": "keyword"
},
"text": {
"norms": false,
"type": "text"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
}, },
"version":{ "version":{
"type":"object", "type":"object",

File diff suppressed because one or more lines are too long

View File

@@ -219,6 +219,8 @@ path.logs: /var/log/logstash
# path.plugins: [] # path.plugins: []
{% set pipeline_workers = salt['pillar.get']('logstash_settings:ls_pipeline_workers', '1') %} {% set pipeline_workers = salt['pillar.get']('logstash_settings:ls_pipeline_workers', '1') %}
{% set pipeline_batch = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', '125') %} {% set pipeline_batch = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', '125') %}
{% set pipeline_ecs_compatibility = salt['pillar.get']('logstash_settings:ls_ecs_compatibility', 'disabled') %}
pipeline.workers: {{ pipeline_workers }} pipeline.workers: {{ pipeline_workers }}
pipeline.batch.size: {{ pipeline_batch }} pipeline.batch.size: {{ pipeline_batch }}
pipeline.ecs_compatibility: {{ pipeline_ecs_compatibility }}

View File

@@ -16,12 +16,14 @@
{% endif %} {% endif %}
{% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import', 'helixsensor'] %} {% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import', 'helixsensor'] %}
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
{% set ca_server = grains.id %}
include: include:
- ca - ca
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
{% set ca_server = grains.id %}
{% else %} {% else %}
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %} include:
- ca.dirs
{% set x509dict = salt['mine.get'](manager~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %} {% for host in x509dict %}
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %} {% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %} {% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}

View File

@@ -760,9 +760,11 @@ configure_minion() {
case "$minion_type" in case "$minion_type" in
'helix') 'helix')
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
echo "master: '$HOSTNAME'" >> "$minion_config" echo "master: '$HOSTNAME'" >> "$minion_config"
;; ;;
'manager' | 'eval' | 'managersearch' | 'standalone' | 'import') 'manager' | 'eval' | 'managersearch' | 'standalone' | 'import')
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
printf '%s\n'\ printf '%s\n'\
"master: '$HOSTNAME'"\ "master: '$HOSTNAME'"\
"mysql.host: '$MAINIP'"\ "mysql.host: '$MAINIP'"\
@@ -1031,6 +1033,8 @@ copy_minion_tmp_files() {
} >> "$setup_log" 2>&1 } >> "$setup_log" 2>&1
;; ;;
esac esac
echo "Syncing all salt modules." >> "$setup_log" 2>&1
salt-call saltutil.sync_modules >> "$setup_log" 2>&1
} }
copy_ssh_key() { copy_ssh_key() {
@@ -1143,7 +1147,7 @@ installer_prereq_packages() {
logCmd "systemctl start NetworkManager" logCmd "systemctl start NetworkManager"
elif [ "$OS" == ubuntu ]; then elif [ "$OS" == ubuntu ]; then
# Print message to stdout so the user knows setup is doing something # Print message to stdout so the user knows setup is doing something
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
# Install network manager so we can do interface stuff # Install network manager so we can do interface stuff
if ! command -v nmcli > /dev/null 2>&1; then if ! command -v nmcli > /dev/null 2>&1; then
retry 50 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1
@@ -1200,18 +1204,24 @@ docker_install() {
else else
case "$install_type" in case "$install_type" in
'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT') 'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT')
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
;; ;;
*) *)
retry 50 10 "apt-key add $temp_install_dir/gpg/docker.pub" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-key add $temp_install_dir/gpg/docker.pub" >> "$setup_log" 2>&1 || exit 1
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" >> "$setup_log" 2>&1 add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" >> "$setup_log" 2>&1
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
;; ;;
esac esac
if [ $OSVER == "bionic" ]; then if [ $OSVER == "bionic" ]; then
service docker stop
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
retry 50 10 "apt-get -y install docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
elif [ $OSVER == "focal" ]; then elif [ $OSVER == "focal" ]; then
service docker stop
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
retry 50 10 "apt-get -y install docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
fi fi
fi fi
docker_registry docker_registry
@@ -1429,6 +1439,28 @@ fleet_pillar() {
"" > "$pillar_file" "" > "$pillar_file"
} }
generate_ca() {
{
echo "Building Certificate Authority";
salt-call state.apply ca;
echo "Confirming existence of the CA certificate"
openssl x509 -in /etc/pki/ca.crt -noout -subject -issuer -dates
} >> "$setup_log" 2>&1
}
generate_ssl() {
{
# if the install type is a manager then we need to wait for the minion to be ready before trying
# to run the ssl state since we need the minion to sign the certs
if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then
wait_for_salt_minion
fi
echo "Applying SSL state";
salt-call state.apply ssl;
} >> "$setup_log" 2>&1
}
generate_passwords(){ generate_passwords(){
# Generate Random Passwords for Things # Generate Random Passwords for Things
MYSQLPASS=$(get_random_value) MYSQLPASS=$(get_random_value)
@@ -2010,6 +2042,9 @@ reserve_ports() {
reinstall_init() { reinstall_init() {
info "Putting system in state to run setup again" info "Putting system in state to run setup again"
# remove all of root's cronjobs
crontab -r -u root
if [[ $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then if [[ $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then
local salt_services=( "salt-master" "salt-minion" ) local salt_services=( "salt-master" "salt-minion" )
else else
@@ -2049,7 +2084,7 @@ reinstall_init() {
done done
# Remove all salt configs # Remove all salt configs
rm -rf /etc/salt/grains /etc/salt/minion /etc/salt/pki/* rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/
if command -v docker &> /dev/null; then if command -v docker &> /dev/null; then
# Stop and remove all so-* containers so files can be changed with more safety # Stop and remove all so-* containers so files can be changed with more safety
@@ -2064,6 +2099,12 @@ reinstall_init() {
# Backup /opt/so since we'll be rebuilding this directory during setup # Backup /opt/so since we'll be rebuilding this directory during setup
backup_dir /opt/so "$date_string" backup_dir /opt/so "$date_string"
# We need to restore these files during a reinstall so python3-influxdb state doesn't try to patch again
restore_file "/opt/so_old_$date_string/state/influxdb_continuous_query.py.patched" "/opt/so/state/"
restore_file "/opt/so_old_$date_string/state/influxdb_retention_policy.py.patched" "/opt/so/state/"
restore_file "/opt/so_old_$date_string/state/influxdbmod.py.patched" "/opt/so/state/"
# If the elastic license has been accepted restore the state file
restore_file "/opt/so_old_$date_string/state/yeselastic.txt" "/opt/so/state/"
# Backup directories in /nsm to prevent app errors # Backup directories in /nsm to prevent app errors
backup_dir /nsm/mysql "$date_string" backup_dir /nsm/mysql "$date_string"
@@ -2097,6 +2138,16 @@ reset_proxy() {
fi fi
} }
restore_file() {
src=$1
dst=$2
if [ -f "$src" ]; then
[ ! -d "$dst" ] && mkdir -v -p "$dst"
echo "Restoring $src to $dst." >> "$setup_log" 2>&1
cp -v "$src" "$dst"
fi
}
backup_dir() { backup_dir() {
dir=$1 dir=$1
backup_suffix=$2 backup_suffix=$2
@@ -2233,7 +2284,7 @@ saltify() {
# Add repo # Add repo
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
set_progress_str 6 'Installing various dependencies' set_progress_str 6 'Installing various dependencies'
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
set_progress_str 7 'Installing salt-master' set_progress_str 7 'Installing salt-master'
@@ -2253,7 +2304,7 @@ saltify() {
;; ;;
esac esac
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
set_progress_str 8 'Installing salt-minion & python modules' set_progress_str 8 'Installing salt-minion & python modules'
retry 50 10 "apt-get -y install salt-minion=3003+ds-1 salt-common=3003+ds-1" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install salt-minion=3003+ds-1 salt-common=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
@@ -2261,107 +2312,6 @@ saltify() {
fi fi
} }
salt_checkin() {
case "$install_type" in
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # Fix Mine usage
{
echo "Building Certificate Authority";
salt-call state.apply ca;
echo " *** Restarting Salt to fix any SSL errors. ***";
local SALT_SERVICES=(\
"salt-master" \
"salt-minion"
)
local count=0
for service in "${SALT_SERVICES[@]}"; do
{
echo "Restarting service $service"
systemctl restart "$service" &
local pid=$!
} >> "$setup_log" 2>&1
count=0
while ! (check_service_status "$service"); do
# On final loop, kill the pid trying to restart service and try to manually kill then start it
if [ $count -eq 12 ]; then
{
kill -9 "$pid"
systemctl kill "$service"
systemctl start "$service" &
local pid=$!
} >> "$setup_log" 2>&1
fi
if [ $count -gt 12 ]; then
echo "$service could not be restarted in 120 seconds, exiting" >> "$setup_log" 2>&1
kill -9 "$pid"
exit 1
fi
sleep 10;
((count++))
done
done
count=1
timeout=60
while ! (check_salt_master_status $timeout); do
echo "salt minion cannot talk to salt master after $timeout seconds" >> "$setup_log" 2>&1
if [ $count -gt 2 ]; then
echo "salt minion could not talk to salt master after $count attempts, exiting" >> "$setup_log" 2>&1
exit 1
fi
sleep 1;
((count++))
((timeout+=30)) # add 30s to the timeout each attempt
done
count=1
timeout=60
while ! (check_salt_minion_status $timeout) ; do
echo "salt master did not get a job response from salt minion after $timeout seconds" >> "$setup_log" 2>&1
if [ $count -gt 2 ]; then
echo "salt master did not get a job response from salt minion after $count attempts, exiting" >> "$setup_log" 2>&1
exit 1
fi
systemctl kill salt-minion
systemctl start salt-minion
sleep 1;
((count++))
((timeout+=30)) # add 30s to the timeout each attempt
done
echo " Confirming existence of the CA certificate"
openssl x509 -in /etc/pki/ca.crt -noout -subject -issuer -dates
echo " Applyng a mine hack";
salt "$MINION_ID" mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt;
salt "$MINION_ID" mine.update;
echo "Confirming salt mine now contains the certificate";
salt "$MINION_ID" mine.get '*' x509.get_pem_entries | grep -E 'BEGIN CERTIFICATE|END CERTIFICATE';
if [ $? -eq 0 ]; then
echo "CA in mine"
else
echo "CA not in mine"
fi
echo " Applying SSL state";
salt-call state.apply ssl;
} >> "$setup_log" 2>&1
;;
*)
{
#salt-call state.apply ca;
salt-call state.apply ssl;
} >> "$setup_log" 2>&1
;;
esac
{
#salt-call state.apply ca;
salt-call state.apply ssl;
salt-call saltutil.sync_modules;
} >> "$setup_log" 2>&1
}
# Run a salt command to generate the minion key # Run a salt command to generate the minion key
salt_firstcheckin() { salt_firstcheckin() {
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
@@ -2868,6 +2818,10 @@ wait_for_file() {
return 1 return 1
} }
wait_for_salt_minion() {
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || exit 1
}
# Enable Zeek Logs # Enable Zeek Logs
zeek_logs_enabled() { zeek_logs_enabled() {
echo "Enabling Zeek Logs" >> "$setup_log" 2>&1 echo "Enabling Zeek Logs" >> "$setup_log" 2>&1

View File

@@ -761,8 +761,13 @@ echo "1" > /root/accept_changes
salt-call state.apply -l info salt.minion >> $setup_log 2>&1 salt-call state.apply -l info salt.minion >> $setup_log 2>&1
fi fi
set_progress_str 23 'Generating CA and checking in' if [[ $is_manager || $is_helix || $is_import ]]; then
salt_checkin >> $setup_log 2>&1 set_progress_str 23 'Generating CA'
generate_ca >> $setup_log 2>&1
fi
set_progress_str 24 'Generating SSL'
generate_ssl >> $setup_log 2>&1
if [[ $is_manager || $is_helix || $is_import ]]; then if [[ $is_manager || $is_helix || $is_import ]]; then
set_progress_str 25 'Configuring firewall' set_progress_str 25 'Configuring firewall'