mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Compare commits
1 Commits
reyesj2-pa
...
2.4.190-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33ada95bbc |
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -32,7 +32,6 @@ body:
|
|||||||
- 2.4.170
|
- 2.4.170
|
||||||
- 2.4.180
|
- 2.4.180
|
||||||
- 2.4.190
|
- 2.4.190
|
||||||
- 2.4.200
|
|
||||||
- Other (please provide detail below)
|
- Other (please provide detail below)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
@@ -220,22 +220,12 @@ compare_es_versions() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
copy_new_files() {
|
copy_new_files() {
|
||||||
# Define files to exclude from deletion (relative to their respective base directories)
|
|
||||||
local EXCLUDE_FILES=(
|
|
||||||
"salt/hypervisor/soc_hypervisor.yaml"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Build rsync exclude arguments
|
|
||||||
local EXCLUDE_ARGS=()
|
|
||||||
for file in "${EXCLUDE_FILES[@]}"; do
|
|
||||||
EXCLUDE_ARGS+=(--exclude="$file")
|
|
||||||
done
|
|
||||||
|
|
||||||
# Copy new files over to the salt dir
|
# Copy new files over to the salt dir
|
||||||
cd $UPDATE_DIR
|
cd $UPDATE_DIR
|
||||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
rsync -a salt $DEFAULT_SALT_DIR/ --delete
|
||||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
rsync -a pillar $DEFAULT_SALT_DIR/ --delete
|
||||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||||
|
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
||||||
cd /tmp
|
cd /tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -32,16 +32,6 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
|||||||
- retry:
|
- retry:
|
||||||
attempts: 4
|
attempts: 4
|
||||||
interval: 30
|
interval: 30
|
||||||
|
|
||||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
|
||||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
- onchanges:
|
|
||||||
- x509: etc_elasticfleet_logstash_crt
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||||
|
|||||||
@@ -8,28 +8,20 @@
|
|||||||
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
||||||
{% if not AGENT_STATUS %}
|
{% if not AGENT_STATUS %}
|
||||||
|
|
||||||
pull_agent_installer:
|
|
||||||
file.managed:
|
|
||||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
|
||||||
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
|
||||||
- mode: 755
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
{% if grains.role not in ['so-heavynode'] %}
|
{% if grains.role not in ['so-heavynode'] %}
|
||||||
run_installer:
|
run_installer:
|
||||||
cmd.run:
|
cmd.script:
|
||||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENGENERAL }}
|
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||||
- cwd: /opt/so
|
- cwd: /opt/so
|
||||||
|
- args: -token={{ GRIDNODETOKENGENERAL }}
|
||||||
- retry: True
|
- retry: True
|
||||||
{% else %}
|
{% else %}
|
||||||
run_installer:
|
run_installer:
|
||||||
cmd.run:
|
cmd.script:
|
||||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKENHEAVY }}
|
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||||
- cwd: /opt/so
|
- cwd: /opt/so
|
||||||
|
- args: -token={{ GRIDNODETOKENHEAVY }}
|
||||||
- retry: True
|
- retry: True
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
cleanup_agent_installer:
|
|
||||||
file.absent:
|
|
||||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -8,27 +8,6 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
FORCE_UPDATE=false
|
|
||||||
UPDATE_CERTS=false
|
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
-f|--force)
|
|
||||||
FORCE_UPDATE=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-c| --certs)
|
|
||||||
UPDATE_CERTS=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unknown option $1"
|
|
||||||
echo "Usage: $0 [-f|--force] [-c|--certs]"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Only run on Managers
|
# Only run on Managers
|
||||||
if ! is_manager_node; then
|
if ! is_manager_node; then
|
||||||
printf "Not a Manager Node... Exiting"
|
printf "Not a Manager Node... Exiting"
|
||||||
@@ -38,42 +17,17 @@ fi
|
|||||||
function update_logstash_outputs() {
|
function update_logstash_outputs() {
|
||||||
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||||
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
||||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
|
||||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
|
||||||
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
|
||||||
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
JSON_STRING=$(jq -n \
|
||||||
# Reuse existing secret
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
JSON_STRING=$(jq -n \
|
--argjson SECRETS "$SECRETS" \
|
||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||||
--argjson SECRETS "$SECRETS" \
|
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
|
||||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
|
||||||
else
|
|
||||||
# Update certs, creating new secret
|
|
||||||
JSON_STRING=$(jq -n \
|
|
||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
|
||||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
|
||||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
|
||||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
|
||||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
JSON_STRING=$(jq -n \
|
||||||
# Reuse existing ssl config
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
JSON_STRING=$(jq -n \
|
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
|
||||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
|
||||||
else
|
|
||||||
# Update ssl config
|
|
||||||
JSON_STRING=$(jq -n \
|
|
||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
|
||||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
|
||||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
|
||||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
|
||||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -197,7 +151,7 @@ NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "$
|
|||||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||||
|
|
||||||
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
||||||
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
|
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||||
printf "\nHashes match - no update needed.\n"
|
printf "\nHashes match - no update needed.\n"
|
||||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -31,19 +31,6 @@ libvirt_conf_dir:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
libvirt_volumes:
|
|
||||||
file.directory:
|
|
||||||
- name: /nsm/libvirt/volumes
|
|
||||||
- user: qemu
|
|
||||||
- group: qemu
|
|
||||||
- dir_mode: 755
|
|
||||||
- file_mode: 640
|
|
||||||
- recurse:
|
|
||||||
- user
|
|
||||||
- group
|
|
||||||
- mode
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
libvirt_config:
|
libvirt_config:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /opt/so/conf/libvirt/libvirtd.conf
|
- name: /opt/so/conf/libvirt/libvirtd.conf
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ whiptail_title='Security Onion UPdater'
|
|||||||
NOTIFYCUSTOMELASTICCONFIG=false
|
NOTIFYCUSTOMELASTICCONFIG=false
|
||||||
TOPFILE=/opt/so/saltstack/default/salt/top.sls
|
TOPFILE=/opt/so/saltstack/default/salt/top.sls
|
||||||
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
||||||
SALTUPGRADED=false
|
|
||||||
SALT_CLOUD_INSTALLED=false
|
|
||||||
# used to display messages to the user at the end of soup
|
# used to display messages to the user at the end of soup
|
||||||
declare -a FINAL_MESSAGE_QUEUE=()
|
declare -a FINAL_MESSAGE_QUEUE=()
|
||||||
|
|
||||||
@@ -1262,39 +1260,24 @@ upgrade_check_salt() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
upgrade_salt() {
|
upgrade_salt() {
|
||||||
|
SALTUPGRADED=True
|
||||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||||
echo ""
|
echo ""
|
||||||
# If rhel family
|
# If rhel family
|
||||||
if [[ $is_rpm ]]; then
|
if [[ $is_rpm ]]; then
|
||||||
# Check if salt-cloud is installed
|
|
||||||
if rpm -q salt-cloud &>/dev/null; then
|
|
||||||
SALT_CLOUD_INSTALLED=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Removing yum versionlock for Salt."
|
echo "Removing yum versionlock for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
yum versionlock delete "salt"
|
yum versionlock delete "salt"
|
||||||
yum versionlock delete "salt-minion"
|
yum versionlock delete "salt-minion"
|
||||||
yum versionlock delete "salt-master"
|
yum versionlock delete "salt-master"
|
||||||
# Remove salt-cloud versionlock if installed
|
|
||||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
|
||||||
yum versionlock delete "salt-cloud"
|
|
||||||
fi
|
|
||||||
echo "Updating Salt packages."
|
echo "Updating Salt packages."
|
||||||
echo ""
|
echo ""
|
||||||
set +e
|
set +e
|
||||||
# if oracle run with -r to ignore repos set by bootstrap
|
# if oracle run with -r to ignore repos set by bootstrap
|
||||||
if [[ $OS == 'oracle' ]]; then
|
if [[ $OS == 'oracle' ]]; then
|
||||||
# Add -L flag only if salt-cloud is already installed
|
run_check_net_err \
|
||||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
|
||||||
run_check_net_err \
|
"Could not update salt, please check $SOUP_LOG for details."
|
||||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -L -F -M stable \"$NEWSALTVERSION\"" \
|
|
||||||
"Could not update salt, please check $SOUP_LOG for details."
|
|
||||||
else
|
|
||||||
run_check_net_err \
|
|
||||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
|
|
||||||
"Could not update salt, please check $SOUP_LOG for details."
|
|
||||||
fi
|
|
||||||
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
|
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
|
||||||
else
|
else
|
||||||
run_check_net_err \
|
run_check_net_err \
|
||||||
@@ -1307,10 +1290,6 @@ upgrade_salt() {
|
|||||||
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
|
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
|
||||||
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
|
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
|
||||||
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
|
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
|
||||||
# Add salt-cloud versionlock if installed
|
|
||||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
|
||||||
yum versionlock add "salt-cloud-0:$NEWSALTVERSION-0.*"
|
|
||||||
fi
|
|
||||||
# Else do Ubuntu things
|
# Else do Ubuntu things
|
||||||
elif [[ $is_deb ]]; then
|
elif [[ $is_deb ]]; then
|
||||||
echo "Removing apt hold for Salt."
|
echo "Removing apt hold for Salt."
|
||||||
@@ -1343,7 +1322,6 @@ upgrade_salt() {
|
|||||||
echo ""
|
echo ""
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
SALTUPGRADED=true
|
|
||||||
echo "Salt upgrade success."
|
echo "Salt upgrade success."
|
||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
@@ -1587,11 +1565,6 @@ main() {
|
|||||||
# ensure the mine is updated and populated before highstates run, following the salt-master restart
|
# ensure the mine is updated and populated before highstates run, following the salt-master restart
|
||||||
update_salt_mine
|
update_salt_mine
|
||||||
|
|
||||||
if [[ $SALT_CLOUD_INSTALLED == true && $SALTUPGRADED == true ]]; then
|
|
||||||
echo "Updating salt-cloud config to use the new Salt version"
|
|
||||||
salt-call state.apply salt.cloud.config concurrent=True
|
|
||||||
fi
|
|
||||||
|
|
||||||
enable_highstate
|
enable_highstate
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ sool9_{{host}}:
|
|||||||
private_key: /etc/ssh/auth_keys/soqemussh/id_ecdsa
|
private_key: /etc/ssh/auth_keys/soqemussh/id_ecdsa
|
||||||
sudo: True
|
sudo: True
|
||||||
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
|
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
|
||||||
script_args: -r -F -x python3 stable {{ SALTVERSION }}
|
script_args: -r -F -x python3 stable 3006.9
|
||||||
minion:
|
minion:
|
||||||
master: {{ grains.host }}
|
master: {{ grains.host }}
|
||||||
master_port: 4506
|
master_port: 4506
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
{% if '.'.join(sls.split('.')[:2]) in allowed_states %}
|
{% if '.'.join(sls.split('.')[:2]) in allowed_states %}
|
||||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
|
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
|
||||||
{% from 'salt/map.jinja' import SALTVERSION %}
|
|
||||||
|
|
||||||
{% if HYPERVISORS %}
|
{% if HYPERVISORS %}
|
||||||
cloud_providers:
|
cloud_providers:
|
||||||
@@ -21,7 +20,7 @@ cloud_providers:
|
|||||||
- name: /etc/salt/cloud.providers.d/libvirt.conf
|
- name: /etc/salt/cloud.providers.d/libvirt.conf
|
||||||
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
|
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
|
||||||
- defaults:
|
- defaults:
|
||||||
HYPERVISORS: {{ HYPERVISORS }}
|
HYPERVISORS: {{HYPERVISORS}}
|
||||||
- template: jinja
|
- template: jinja
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
@@ -30,10 +29,9 @@ cloud_profiles:
|
|||||||
- name: /etc/salt/cloud.profiles.d/socloud.conf
|
- name: /etc/salt/cloud.profiles.d/socloud.conf
|
||||||
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
|
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
|
||||||
- defaults:
|
- defaults:
|
||||||
HYPERVISORS: {{ HYPERVISORS }}
|
HYPERVISORS: {{HYPERVISORS}}
|
||||||
MANAGERHOSTNAME: {{ grains.host }}
|
MANAGERHOSTNAME: {{ grains.host }}
|
||||||
MANAGERIP: {{ pillar.host.mainip }}
|
MANAGERIP: {{ pillar.host.mainip }}
|
||||||
SALTVERSION: {{ SALTVERSION }}
|
|
||||||
- template: jinja
|
- template: jinja
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||||
salt:
|
salt:
|
||||||
master:
|
master:
|
||||||
version: '3006.16'
|
version: '3006.9'
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||||
salt:
|
salt:
|
||||||
minion:
|
minion:
|
||||||
version: '3006.16'
|
version: '3006.9'
|
||||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||||
|
|||||||
@@ -337,5 +337,4 @@
|
|||||||
]
|
]
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
interval = "1h"
|
interval = "1h"
|
||||||
timeout = "120s"
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|||||||
Reference in New Issue
Block a user