mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge branch 'escluster' into newescluster
This commit is contained in:
@@ -18,7 +18,7 @@ def mysql_conn(retry):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
mainint = __salt__['pillar.get']('host:mainint')
|
mainint = __salt__['pillar.get']('host:mainint')
|
||||||
ip_arr = __salt__['grains.get']('ip_interfaces').get(mainint)
|
ip_arr = __salt__['grains.get']('ip4_interfaces').get(mainint)
|
||||||
|
|
||||||
mysql_up = False
|
mysql_up = False
|
||||||
|
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ heldpackages:
|
|||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.2.13-2
|
- containerd.io: 1.2.13-2
|
||||||
- docker-ce: 5:19.03.12~3-0~ubuntu-bionic
|
- docker-ce: 5:19.03.14~3-0~ubuntu-bionic
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
|
|
||||||
@@ -147,7 +147,7 @@ heldpackages:
|
|||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.2.13-3.2.el7
|
- containerd.io: 1.2.13-3.2.el7
|
||||||
- docker-ce: 3:19.03.12-3.el7
|
- docker-ce: 3:19.03.14-3.el7
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
43
salt/common/tools/sbin/so-elastic-restart
Executable file
43
salt/common/tools/sbin/so-elastic-restart
Executable file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
|
||||||
|
/usr/sbin/so-restart elasticsearch $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||||
|
/usr/sbin/so-restart kibana $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||||
|
/usr/sbin/so-restart logstash $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
|
||||||
|
/usr/sbin/so-restart filebeat $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||||
|
/usr/sbin/so-restart curator $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||||
|
/usr/sbin/so-restart elastalert $1
|
||||||
|
{%- endif %}
|
||||||
43
salt/common/tools/sbin/so-elastic-start
Executable file
43
salt/common/tools/sbin/so-elastic-start
Executable file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
|
||||||
|
/usr/sbin/so-start elasticsearch $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||||
|
/usr/sbin/so-start kibana $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||||
|
/usr/sbin/so-start logstash $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
|
||||||
|
/usr/sbin/so-start filebeat $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||||
|
/usr/sbin/so-start curator $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||||
|
/usr/sbin/so-start elastalert $1
|
||||||
|
{%- endif %}
|
||||||
43
salt/common/tools/sbin/so-elastic-stop
Executable file
43
salt/common/tools/sbin/so-elastic-stop
Executable file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
|
||||||
|
/usr/sbin/so-stop elasticsearch $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||||
|
/usr/sbin/so-stop kibana $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||||
|
/usr/sbin/so-stop logstash $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
|
||||||
|
/usr/sbin/so-stop filebeat $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||||
|
/usr/sbin/so-stop curator $1
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||||
|
/usr/sbin/so-stop elastalert $1
|
||||||
|
{%- endif %}
|
||||||
@@ -22,5 +22,5 @@ salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create
|
|||||||
/usr/sbin/so-soctopus-restart
|
/usr/sbin/so-soctopus-restart
|
||||||
|
|
||||||
echo "Importing Plays - this will take some time...."
|
echo "Importing Plays - this will take some time...."
|
||||||
wait 5
|
sleep 5
|
||||||
/usr/sbin/so-playbook-ruleupdate
|
/usr/sbin/so-playbook-ruleupdate
|
||||||
0
salt/common/tools/sbin/so-salt-minion-check
Normal file → Executable file
0
salt/common/tools/sbin/so-salt-minion-check
Normal file → Executable file
63
salt/common/tools/sbin/so-suricata-testrule
Normal file
63
salt/common/tools/sbin/so-suricata-testrule
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
{%- set MANAGER = salt['grains.get']('master') %}
|
||||||
|
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||||
|
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||||
|
|
||||||
|
TESTRULE=$1
|
||||||
|
TESTPCAP=$2
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "==============="
|
||||||
|
echo "Running all.rules and $TESTRULE against the following pcap: $TESTPCAP"
|
||||||
|
echo ""
|
||||||
|
sleep 3
|
||||||
|
|
||||||
|
cp /opt/so/conf/suricata/rules/all.rules /tmp/nids-testing/rules/all.rules
|
||||||
|
cat $TESTRULE >> /tmp/nids-testing/rules/all.rules
|
||||||
|
|
||||||
|
rm -rf /tmp/nids-testing/output
|
||||||
|
mkdir -p /tmp/nids-testing/output
|
||||||
|
chown suricata:socore /tmp/nids-testing/output
|
||||||
|
mkdir -p /tmp/nids-testing/rules
|
||||||
|
|
||||||
|
|
||||||
|
echo "==== Begin Suricata Output ==="
|
||||||
|
|
||||||
|
docker run --rm \
|
||||||
|
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
|
||||||
|
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
|
||||||
|
-v /tmp/nids-testing/rules:/etc/suricata/rules:ro \
|
||||||
|
-v "$TESTPCAP:/input.pcap:ro" \
|
||||||
|
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
|
||||||
|
-v /tmp/nids-testing/output/:/nsm/:rw \
|
||||||
|
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
|
||||||
|
--runmode single -v -k none -r /input.pcap -l /tmp --init-errors-fatal
|
||||||
|
echo "==== End Suricata Output ==="
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "If any alerts hit, they will be displayed below:"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
cat /tmp/nids-testing/output/* | jq
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "End so-suricata-testrule"
|
||||||
|
echo "==============="
|
||||||
|
echo ""
|
||||||
@@ -61,3 +61,4 @@ discovery.seed_hosts:
|
|||||||
- {{ grains.master }}
|
- {{ grains.master }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
indices.query.bool.max_clause_count: 1500
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
1762
salt/playbook/files/playbook_db_migrations.sql
Normal file
1762
salt/playbook/files/playbook_db_migrations.sql
Normal file
File diff suppressed because one or more lines are too long
@@ -38,7 +38,7 @@ query_playbookdbuser_grants:
|
|||||||
query_updatwebhooks:
|
query_updatwebhooks:
|
||||||
mysql_query.run:
|
mysql_query.run:
|
||||||
- database: playbook
|
- database: playbook
|
||||||
- query: "update webhooks set url = 'http://{{MANAGERIP}}:7000/playbook/webhook' where project_id = 1"
|
- query: "update webhooks set url = 'http://{{MANAGERIP}}:7000/playbook/webhook' where project_id in (1,2)"
|
||||||
- connection_host: {{ MAINIP }}
|
- connection_host: {{ MAINIP }}
|
||||||
- connection_port: 3306
|
- connection_port: 3306
|
||||||
- connection_user: root
|
- connection_user: root
|
||||||
|
|||||||
@@ -1,12 +1,16 @@
|
|||||||
{% set URLBASE = salt['pillar.get']('global:url_base') -%}
|
{% set URLBASE = salt['pillar.get']('global:url_base') -%}
|
||||||
|
{% set DESCRIPTION = salt['pillar.get']('sensoroni:node_description') -%}
|
||||||
|
{% set ADDRESS = salt['pillar.get']('sensoroni:node_address') -%}
|
||||||
{% set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%}
|
{% set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%}
|
||||||
{% set CHECKININTERVALMS = salt['pillar.get']('sensoroni:sensor_checkin_interval_ms', 10000) -%}
|
{% set CHECKININTERVALMS = salt['pillar.get']('sensoroni:node_checkin_interval_ms', 10000) -%}
|
||||||
{% set STENOENABLED = salt['pillar.get']('steno:enabled', False) -%}
|
{% set STENOENABLED = salt['pillar.get']('steno:enabled', False) -%}
|
||||||
{
|
{
|
||||||
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
|
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
|
||||||
"logLevel":"info",
|
"logLevel":"info",
|
||||||
"agent": {
|
"agent": {
|
||||||
"role": "{{ grains.role }}",
|
"role": "{{ grains.role }}",
|
||||||
|
"description": "{{ DESCRIPTION }}",
|
||||||
|
"address": "{{ ADDRESS }}",
|
||||||
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
|
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
|
||||||
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
|
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
|
||||||
"verifyCert": false,
|
"verifyCert": false,
|
||||||
|
|||||||
@@ -43,6 +43,10 @@
|
|||||||
"password": "",
|
"password": "",
|
||||||
"verifyCert": false
|
"verifyCert": false
|
||||||
},
|
},
|
||||||
|
"sostatus": {
|
||||||
|
"refreshIntervalMs": 30000,
|
||||||
|
"offlineThresholdMs": 60000
|
||||||
|
},
|
||||||
{% if THEHIVEKEY != '' %}
|
{% if THEHIVEKEY != '' %}
|
||||||
"thehive": {
|
"thehive": {
|
||||||
"hostUrl": "http://{{ MANAGERIP }}:9000/thehive",
|
"hostUrl": "http://{{ MANAGERIP }}:9000/thehive",
|
||||||
|
|||||||
@@ -251,19 +251,19 @@ check_pass_match() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# False if stopped, true if running
|
||||||
check_service_status() {
|
check_service_status() {
|
||||||
|
|
||||||
local service_name=$1
|
local service_name=$1
|
||||||
echo "Checking service $service_name status" >> "$setup_log" 2>&1
|
echo "Checking service $service_name status" >> "$setup_log" 2>&1
|
||||||
systemctl status $service_name > /dev/null 2>&1
|
systemctl status $service_name > /dev/null 2>&1
|
||||||
local status=$?
|
local status=$?
|
||||||
#true if there is an issue with the service false if it is running properly
|
|
||||||
if [ $status -gt 0 ]; then
|
if [ $status -gt 0 ]; then
|
||||||
echo " $service_name is not running" >> "$setup_log" 2>&1
|
echo " $service_name is not running" >> "$setup_log" 2>&1
|
||||||
echo 1;
|
return 1;
|
||||||
else
|
else
|
||||||
echo " $service_name is running" >> "$setup_log" 2>&1
|
echo " $service_name is running" >> "$setup_log" 2>&1
|
||||||
echo 0;
|
return 0;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -273,28 +273,27 @@ check_salt_master_status() {
|
|||||||
salt-call saltutil.kill_all_jobs > /dev/null 2>&1
|
salt-call saltutil.kill_all_jobs > /dev/null 2>&1
|
||||||
salt-call state.show_top > /dev/null 2>&1
|
salt-call state.show_top > /dev/null 2>&1
|
||||||
local status=$?
|
local status=$?
|
||||||
#true if there is an issue talking to salt master
|
|
||||||
if [ $status -gt 0 ]; then
|
if [ $status -gt 0 ]; then
|
||||||
echo 1;
|
echo " Could not talk to salt master" >> "$setup_log" 2>&1
|
||||||
|
return 1;
|
||||||
else
|
else
|
||||||
echo " Can talk to salt master" >> "$setup_log" 2>&1
|
echo " Can talk to salt master" >> "$setup_log" 2>&1
|
||||||
echo 0;
|
return 0;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
check_salt_minion_status() {
|
check_salt_minion_status() {
|
||||||
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
|
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
|
||||||
salt "$MINION_ID" test.ping >> "$setup_log" 2>&1
|
salt "$MINION_ID" test.ping > /dev/null 2>&1
|
||||||
local status=$?
|
local status=$?
|
||||||
#true if there is an issue getting a job response from the minion
|
|
||||||
if [ $status -gt 0 ]; then
|
if [ $status -gt 0 ]; then
|
||||||
echo 1;
|
echo " Minion did not respond" >> "$setup_log" 2>&1
|
||||||
|
return 1;
|
||||||
else
|
else
|
||||||
echo " Received job response from salt minion" >> "$setup_log" 2>&1
|
echo " Received job response from salt minion" >> "$setup_log" 2>&1
|
||||||
echo 0;
|
return 0;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
check_soremote_pass() {
|
check_soremote_pass() {
|
||||||
@@ -767,12 +766,12 @@ detect_os() {
|
|||||||
|
|
||||||
disable_auto_start() {
|
disable_auto_start() {
|
||||||
|
|
||||||
if crontab -l -u $INSTALLUSERNAME 2>&1 | grep so-setup > /dev/null 2>&1; then
|
if crontab -l -u $INSTALLUSERNAME 2>&1 | grep -q so-setup; then
|
||||||
# Remove the automated setup script from crontab, if it exists
|
# Remove the automated setup script from crontab, if it exists
|
||||||
logCmd "crontab -u $INSTALLUSERNAME -r"
|
logCmd "crontab -u $INSTALLUSERNAME -r"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if grep so-setup /home/$INSTALLUSERNAME/.bash_profile > /dev/null 2>&1; then
|
if grep -s -q so-setup /home/$INSTALLUSERNAME/.bash_profile; then
|
||||||
# Truncate last line of the bash profile
|
# Truncate last line of the bash profile
|
||||||
info "Removing auto-run of setup from bash profile"
|
info "Removing auto-run of setup from bash profile"
|
||||||
sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1
|
sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1
|
||||||
@@ -822,9 +821,9 @@ docker_install() {
|
|||||||
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
|
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
|
||||||
fi
|
fi
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! $is_iso ]]; then
|
||||||
yum -y install docker-ce-19.03.12-3.el7 containerd.io-1.2.13-3.2.el7;
|
yum -y install docker-ce-19.03.14-3.el7 containerd.io-1.2.13-3.2.el7;
|
||||||
fi
|
fi
|
||||||
yum versionlock docker-ce-19.03.12-3.el7;
|
yum versionlock docker-ce-19.03.14-3.el7;
|
||||||
yum versionlock containerd.io-1.2.13-3.2.el7
|
yum versionlock containerd.io-1.2.13-3.2.el7
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
|
|
||||||
@@ -1104,10 +1103,10 @@ manager_pillar() {
|
|||||||
manager_global() {
|
manager_global() {
|
||||||
local global_pillar="$local_salt_dir/pillar/global.sls"
|
local global_pillar="$local_salt_dir/pillar/global.sls"
|
||||||
|
|
||||||
if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then
|
if [ -z "$NODE_CHECKIN_INTERVAL_MS" ]; then
|
||||||
SENSOR_CHECKIN_INTERVAL_MS=10000
|
NODE_CHECKIN_INTERVAL_MS=10000
|
||||||
if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then
|
if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then
|
||||||
SENSOR_CHECKIN_INTERVAL_MS=1000
|
NODE_CHECKIN_INTERVAL_MS=1000
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -1167,7 +1166,9 @@ manager_global() {
|
|||||||
" imagerepo: '$IMAGEREPO'"\
|
" imagerepo: '$IMAGEREPO'"\
|
||||||
" pipeline: 'redis'"\
|
" pipeline: 'redis'"\
|
||||||
"sensoroni:"\
|
"sensoroni:"\
|
||||||
" sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\
|
" node_address: '$MAINIP'"\
|
||||||
|
" node_description: '$NODE_DESCRIPTION'"\
|
||||||
|
" node_checkin_interval_ms: $NODE_CHECKIN_INTERVAL_MS"\
|
||||||
"strelka:"\
|
"strelka:"\
|
||||||
" enabled: $STRELKA"\
|
" enabled: $STRELKA"\
|
||||||
" rules: 1"\
|
" rules: 1"\
|
||||||
@@ -1398,12 +1399,46 @@ reserve_group_ids() {
|
|||||||
reinstall_init() {
|
reinstall_init() {
|
||||||
info "Putting system in state to run setup again"
|
info "Putting system in state to run setup again"
|
||||||
|
|
||||||
|
if [[ $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then
|
||||||
|
local salt_services=( "salt-master" "salt-minion" )
|
||||||
|
else
|
||||||
|
local salt_services=( "salt-minion" )
|
||||||
|
fi
|
||||||
|
|
||||||
|
local service_retry_count=20
|
||||||
|
|
||||||
{
|
{
|
||||||
# Kill any salt processes
|
if command -v salt-call &> /dev/null; then
|
||||||
pkill -9 -ef /usr/bin/salt
|
# Disable schedule so highstate doesn't start running during the install
|
||||||
|
salt-call -l info schedule.disable
|
||||||
|
|
||||||
|
# Kill any currently running salt jobs, also to prevent issues with highstate.
|
||||||
|
salt-call -l info saltutil.kill_all_jobs
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Kill any salt processes (safely)
|
||||||
|
for service in "${salt_services[@]}"; do
|
||||||
|
# Stop the service in the background so we can exit after a certain amount of time
|
||||||
|
systemctl stop "$service" &
|
||||||
|
local pid=$!
|
||||||
|
|
||||||
|
local count=0
|
||||||
|
while check_service_status "$service"; do
|
||||||
|
if [[ $count -gt $service_retry_count ]]; then
|
||||||
|
echo "Could not stop $service after 1 minute, exiting setup."
|
||||||
|
|
||||||
|
# Stop the systemctl process trying to kill the service, show user a message, then exit setup
|
||||||
|
kill -9 $pid
|
||||||
|
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
((count++))
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
# Remove all salt configs
|
# Remove all salt configs
|
||||||
rm -rf /etc/salt/global /etc/salt/minion /etc/salt/master /etc/salt/pki/*
|
rm -rf /etc/salt/grains /etc/salt/minion /etc/salt/pki/*
|
||||||
|
|
||||||
if command -v docker &> /dev/null; then
|
if command -v docker &> /dev/null; then
|
||||||
# Stop and remove all so-* containers so files can be changed with more safety
|
# Stop and remove all so-* containers so files can be changed with more safety
|
||||||
@@ -1424,7 +1459,7 @@ reinstall_init() {
|
|||||||
# Remove the old launcher package in case the config changes
|
# Remove the old launcher package in case the config changes
|
||||||
remove_package launcher-final
|
remove_package launcher-final
|
||||||
|
|
||||||
} >> $setup_log 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
backup_dir() {
|
backup_dir() {
|
||||||
@@ -1621,61 +1656,59 @@ salt_checkin() {
|
|||||||
"salt-master" \
|
"salt-master" \
|
||||||
"salt-minion"
|
"salt-minion"
|
||||||
)
|
)
|
||||||
local LOOP_COUNT=0
|
local count=0
|
||||||
for service in "${SALT_SERVICES[@]}"; do
|
|
||||||
echo "Stopping service $service" >> "$setup_log" 2>&1
|
|
||||||
systemctl stop "$service" >> "$setup_log" 2>&1
|
|
||||||
LOOP_COUNT=0
|
|
||||||
while ! (( $(check_service_status $service) )); do
|
|
||||||
echo "$service still running" >> "$setup_log" 2>&1
|
|
||||||
if [ $LOOP_COUNT -gt 60 ]; then
|
|
||||||
echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sleep 1;
|
|
||||||
((LOOP_COUNT+=1))
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
sleep 5;
|
|
||||||
|
|
||||||
for service in "${SALT_SERVICES[@]}"; do
|
for service in "${SALT_SERVICES[@]}"; do
|
||||||
echo "Starting service $service" >> "$setup_log" 2>&1
|
{
|
||||||
systemctl start "$service" >> "$setup_log" 2>&1
|
echo "Restarting service $service"
|
||||||
LOOP_COUNT=0
|
systemctl restart "$service" &
|
||||||
while (( $(check_service_status $service) )); do
|
local pid=$!
|
||||||
echo "$service still not running" >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
if [ $LOOP_COUNT -gt 60 ]; then
|
|
||||||
echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1
|
count=0
|
||||||
exit 1
|
while ! (check_service_status "$service"); do
|
||||||
|
# On final loop, kill the pid trying to restart service and try to manually kill then start it
|
||||||
|
if [ $count -eq 12 ]; then
|
||||||
|
{
|
||||||
|
kill -9 "$pid"
|
||||||
|
systemctl kill "$service"
|
||||||
|
systemctl start "$service" &
|
||||||
|
local pid=$!
|
||||||
|
} >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
sleep 1;
|
|
||||||
((LOOP_COUNT+=1))
|
if [ $count -gt 12 ]; then
|
||||||
|
echo "$service could not be restarted in 120 seconds, exiting" >> "$setup_log" 2>&1
|
||||||
|
kill -9 "$pid"
|
||||||
|
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
|
fi
|
||||||
|
sleep 10;
|
||||||
|
((count++))
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
sleep 5;
|
count=0
|
||||||
|
while ! (check_salt_master_status); do
|
||||||
LOOP_COUNT=0
|
|
||||||
while (( $(check_salt_master_status) )); do
|
|
||||||
echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1
|
echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1
|
||||||
if [ $LOOP_COUNT -gt 30 ]; then
|
if [ $count -gt 30 ]; then
|
||||||
echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1
|
echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1
|
||||||
exit 1
|
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
fi
|
fi
|
||||||
sleep 1;
|
sleep 1;
|
||||||
((LOOP_COUNT+=1))
|
((count++))
|
||||||
done
|
done
|
||||||
|
|
||||||
LOOP_COUNT=0
|
count=0
|
||||||
while (( $(check_salt_minion_status) )); do
|
while ! (check_salt_minion_status); do
|
||||||
echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1
|
echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1
|
||||||
if [ $LOOP_COUNT -gt 30 ]; then
|
if [ $count -gt 30 ]; then
|
||||||
echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1
|
echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1
|
||||||
exit 1
|
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
fi
|
fi
|
||||||
|
systemctl kill salt-minion
|
||||||
|
systemctl start salt-minion
|
||||||
sleep 1;
|
sleep 1;
|
||||||
((LOOP_COUNT+=1))
|
((count++))
|
||||||
done
|
done
|
||||||
|
|
||||||
echo " Confirming existence of the CA certificate"
|
echo " Confirming existence of the CA certificate"
|
||||||
|
|||||||
@@ -433,31 +433,57 @@ whiptail_make_changes
|
|||||||
# From here on changes will be made.
|
# From here on changes will be made.
|
||||||
echo "1" > /root/accept_changes
|
echo "1" > /root/accept_changes
|
||||||
|
|
||||||
if [[ $is_reinstall ]]; then
|
# Set up handler for setup to exit early (use `kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1` in child scripts)
|
||||||
reinstall_init
|
trap 'catch $LINENO' SIGUSR1
|
||||||
fi
|
|
||||||
|
|
||||||
|
catch() {
|
||||||
|
info "Fatal error occurred at $1 in so-setup, failing setup."
|
||||||
|
whiptail_setup_failed
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
# Init networking so rest of install works
|
||||||
if [[ -n "$TURBO" ]]; then
|
if [[ -n "$TURBO" ]]; then
|
||||||
use_turbo_proxy
|
use_turbo_proxy
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$setup_type" == 'iso' ]]; then
|
if [[ "$setup_type" == 'iso' ]]; then
|
||||||
# Init networking so rest of install works
|
set_hostname >> $setup_log 2>&1
|
||||||
set_hostname
|
|
||||||
set_management_interface
|
set_management_interface
|
||||||
fi
|
fi
|
||||||
|
|
||||||
disable_ipv6
|
disable_ipv6
|
||||||
disable_auto_start
|
|
||||||
|
|
||||||
if [[ "$setup_type" != 'iso' ]]; then
|
if [[ "$setup_type" != 'iso' ]]; then
|
||||||
set_hostname
|
set_hostname >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_minion ]]; then
|
if [[ $is_minion ]]; then
|
||||||
add_mngr_ip_to_hosts
|
add_mngr_ip_to_hosts
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
||||||
|
{
|
||||||
|
set_main_ip;
|
||||||
|
set_redirect;
|
||||||
|
} >> $setup_log 2>&1
|
||||||
|
|
||||||
|
# Begin install
|
||||||
|
{
|
||||||
|
# Set initial percentage to 0
|
||||||
|
export percentage=0
|
||||||
|
|
||||||
|
# Show initial progress message
|
||||||
|
set_progress_str 0 'Running initial configuration steps'
|
||||||
|
|
||||||
|
set_path
|
||||||
|
|
||||||
|
if [[ $is_reinstall ]]; then
|
||||||
|
reinstall_init
|
||||||
|
fi
|
||||||
|
|
||||||
|
disable_auto_start
|
||||||
|
|
||||||
{
|
{
|
||||||
mark_version;
|
mark_version;
|
||||||
clear_manager;
|
clear_manager;
|
||||||
@@ -476,11 +502,6 @@ if [[ $is_manager && ! $is_eval ]]; then
|
|||||||
add_soremote_user_manager >> $setup_log 2>&1
|
add_soremote_user_manager >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
{
|
|
||||||
set_main_ip;
|
|
||||||
set_redirect;
|
|
||||||
} >> $setup_log 2>&1
|
|
||||||
|
|
||||||
host_pillar >> $setup_log 2>&1
|
host_pillar >> $setup_log 2>&1
|
||||||
|
|
||||||
if [[ $is_minion || $is_import ]]; then
|
if [[ $is_minion || $is_import ]]; then
|
||||||
@@ -488,12 +509,6 @@ if [[ $is_minion || $is_import ]]; then
|
|||||||
[ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1
|
[ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Begin install
|
|
||||||
{
|
|
||||||
# Set initial percentage to 0
|
|
||||||
export percentage=0
|
|
||||||
set_path
|
|
||||||
|
|
||||||
if [[ $is_manager && $is_airgap ]]; then
|
if [[ $is_manager && $is_airgap ]]; then
|
||||||
info "Creating airgap repo"
|
info "Creating airgap repo"
|
||||||
create_repo >> $setup_log 2>&1
|
create_repo >> $setup_log 2>&1
|
||||||
@@ -588,7 +603,7 @@ fi
|
|||||||
|
|
||||||
if [[ $is_minion ]]; then
|
if [[ $is_minion ]]; then
|
||||||
set_progress_str 22 'Checking if the Salt Minion needs to be updated'
|
set_progress_str 22 'Checking if the Salt Minion needs to be updated'
|
||||||
salt-call state.apply salt.minion -l info >> $setup_log 2>&1
|
salt-call state.apply -l info salt.minion >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_progress_str 23 'Generating CA and checking in'
|
set_progress_str 23 'Generating CA and checking in'
|
||||||
|
|||||||
Reference in New Issue
Block a user