mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 17:52:46 +01:00
Merge branch 'dev' into kilo
This commit is contained in:
@@ -1,18 +1,18 @@
|
|||||||
### 2.3.110-20220309 ISO image built on 2022/03/09
|
### 2.3.110-20220407 ISO image built on 2022/04/07
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Download and Verify
|
### Download and Verify
|
||||||
|
|
||||||
2.3.110-20220309 ISO image:
|
2.3.110-20220407 ISO image:
|
||||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220309.iso
|
https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220407.iso
|
||||||
|
|
||||||
MD5: 537564F8B56633E2D46E5E7C4E2BF18A
|
MD5: 928D589709731EFE9942CA134A6F4C6B
|
||||||
SHA1: 1E1B42EDB711AC8B5963B3460056770B91AE6BFC
|
SHA1: CA588A684586CC0D5BDE5E0E41C935FFB939B6C7
|
||||||
SHA256: 4D73E5BE578DA43DCFD3C1B5F9AF07A7980D8DF90ACDDFEF6CEA177F872EECA0
|
SHA256: CBF8743838AF2C7323E629FB6B28D5DD00AE6658B0E29E4D0916411D2D526BD2
|
||||||
|
|
||||||
Signature for ISO image:
|
Signature for ISO image:
|
||||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220309.iso.sig
|
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220407.iso.sig
|
||||||
|
|
||||||
Signing key:
|
Signing key:
|
||||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||||
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
|||||||
|
|
||||||
Download the signature file for the ISO:
|
Download the signature file for the ISO:
|
||||||
```
|
```
|
||||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220309.iso.sig
|
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220407.iso.sig
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the ISO image:
|
Download the ISO image:
|
||||||
```
|
```
|
||||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220309.iso
|
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220407.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify the downloaded ISO image using the signature file:
|
Verify the downloaded ISO image using the signature file:
|
||||||
```
|
```
|
||||||
gpg --verify securityonion-2.3.110-20220309.iso.sig securityonion-2.3.110-20220309.iso
|
gpg --verify securityonion-2.3.110-20220407.iso.sig securityonion-2.3.110-20220407.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||||
```
|
```
|
||||||
gpg: Signature made Wed 09 Mar 2022 10:20:47 AM EST using RSA key ID FE507013
|
gpg: Signature made Thu 07 Apr 2022 03:30:03 PM EDT using RSA key ID FE507013
|
||||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||||
gpg: WARNING: This key is not certified with a trusted signature!
|
gpg: WARNING: This key is not certified with a trusted signature!
|
||||||
gpg: There is no indication that the signature belongs to the owner.
|
gpg: There is no indication that the signature belongs to the owner.
|
||||||
|
|||||||
@@ -93,8 +93,7 @@ check_err() {
|
|||||||
fi
|
fi
|
||||||
set +e
|
set +e
|
||||||
systemctl_func "start" "$cron_service_name"
|
systemctl_func "start" "$cron_service_name"
|
||||||
echo "Ensuring highstate is enabled."
|
enable_highstate
|
||||||
salt-call state.enable highstate --local
|
|
||||||
exit $exit_code
|
exit $exit_code
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -366,6 +365,12 @@ clone_to_tmp() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enable_highstate() {
|
||||||
|
echo "Enabling highstate."
|
||||||
|
salt-call state.enable highstate -l info --local
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
generate_and_clean_tarballs() {
|
generate_and_clean_tarballs() {
|
||||||
local new_version
|
local new_version
|
||||||
new_version=$(cat $UPDATE_DIR/VERSION)
|
new_version=$(cat $UPDATE_DIR/VERSION)
|
||||||
@@ -492,10 +497,10 @@ stop_salt_master() {
|
|||||||
set +e
|
set +e
|
||||||
echo ""
|
echo ""
|
||||||
echo "Killing all Salt jobs across the grid."
|
echo "Killing all Salt jobs across the grid."
|
||||||
salt \* saltutil.kill_all_jobs
|
salt \* saltutil.kill_all_jobs >> $SOUP_LOG 2>&1
|
||||||
echo ""
|
echo ""
|
||||||
echo "Killing any queued Salt jobs on the manager."
|
echo "Killing any queued Salt jobs on the manager."
|
||||||
pkill -9 -ef "/usr/bin/python3 /bin/salt"
|
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
@@ -857,7 +862,7 @@ upgrade_salt() {
|
|||||||
echo ""
|
echo ""
|
||||||
set +e
|
set +e
|
||||||
run_check_net_err \
|
run_check_net_err \
|
||||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||||||
"Could not update salt, please check $SOUP_LOG for details."
|
"Could not update salt, please check $SOUP_LOG for details."
|
||||||
set -e
|
set -e
|
||||||
echo "Applying apt hold for Salt."
|
echo "Applying apt hold for Salt."
|
||||||
@@ -866,11 +871,27 @@ upgrade_salt() {
|
|||||||
apt-mark hold "salt-master"
|
apt-mark hold "salt-master"
|
||||||
apt-mark hold "salt-minion"
|
apt-mark hold "salt-minion"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "Checking if Salt was upgraded."
|
||||||
|
echo ""
|
||||||
|
# Check that Salt was upgraded
|
||||||
|
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||||||
|
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
|
||||||
|
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
||||||
|
echo "Once the issue is resolved, run soup again."
|
||||||
|
echo "Exiting."
|
||||||
|
echo ""
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "Salt upgrade success."
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
update_repo() {
|
update_repo() {
|
||||||
echo "Performing repo changes."
|
|
||||||
if [[ "$OS" == "centos" ]]; then
|
if [[ "$OS" == "centos" ]]; then
|
||||||
|
echo "Performing repo changes."
|
||||||
# Import GPG Keys
|
# Import GPG Keys
|
||||||
gpg_rpm_import
|
gpg_rpm_import
|
||||||
echo "Disabling fastestmirror."
|
echo "Disabling fastestmirror."
|
||||||
@@ -890,6 +911,21 @@ update_repo() {
|
|||||||
yum clean all
|
yum clean all
|
||||||
yum repolist
|
yum repolist
|
||||||
fi
|
fi
|
||||||
|
elif [[ "$OS" == "ubuntu" ]]; then
|
||||||
|
ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
|
||||||
|
|
||||||
|
if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then
|
||||||
|
OSVER=bionic
|
||||||
|
elif grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
|
||||||
|
OSVER=focal
|
||||||
|
else
|
||||||
|
echo "We do not support your current version of Ubuntu."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f /etc/apt/sources.list.d/salt.list
|
||||||
|
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt $OSVER main" > /etc/apt/sources.list.d/saltstack.list
|
||||||
|
apt-get update
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -922,6 +958,8 @@ verify_latest_update_script() {
|
|||||||
apply_hotfix() {
|
apply_hotfix() {
|
||||||
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
|
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
|
||||||
fix_wazuh
|
fix_wazuh
|
||||||
|
elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
|
||||||
|
2_3_10_hotfix_1
|
||||||
else
|
else
|
||||||
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||||||
fi
|
fi
|
||||||
@@ -943,6 +981,28 @@ fix_wazuh() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#upgrade salt to 3004.1
|
||||||
|
2_3_10_hotfix_1() {
|
||||||
|
systemctl_func "stop" "$cron_service_name"
|
||||||
|
# update mine items prior to stopping salt-minion and salt-master
|
||||||
|
update_salt_mine
|
||||||
|
stop_salt_minion
|
||||||
|
stop_salt_master
|
||||||
|
update_repo
|
||||||
|
# Does salt need upgraded. If so update it.
|
||||||
|
if [[ $UPGRADESALT -eq 1 ]]; then
|
||||||
|
echo "Upgrading Salt"
|
||||||
|
# Update the repo files so it can actually upgrade
|
||||||
|
upgrade_salt
|
||||||
|
fi
|
||||||
|
rm -f /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdbmod.py.patched /opt/so/state/influxdb_retention_policy.py.patched
|
||||||
|
systemctl_func "start" "salt-master"
|
||||||
|
salt-call state.apply salt.python3-influxdb -l info
|
||||||
|
systemctl_func "start" "salt-minion"
|
||||||
|
systemctl_func "start" "$cron_service_name"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
trap 'check_err $?' EXIT
|
trap 'check_err $?' EXIT
|
||||||
|
|
||||||
@@ -1012,12 +1072,19 @@ main() {
|
|||||||
upgrade_check_salt
|
upgrade_check_salt
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
if [[ $is_airgap -eq 0 ]]; then
|
||||||
|
update_centos_repo
|
||||||
|
yum clean all
|
||||||
|
check_os_updates
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$is_hotfix" == "true" ]; then
|
if [ "$is_hotfix" == "true" ]; then
|
||||||
echo "Applying $HOTFIXVERSION hotfix"
|
echo "Applying $HOTFIXVERSION hotfix"
|
||||||
copy_new_files
|
copy_new_files
|
||||||
apply_hotfix
|
apply_hotfix
|
||||||
echo "Hotfix applied"
|
echo "Hotfix applied"
|
||||||
update_version
|
update_version
|
||||||
|
enable_highstate
|
||||||
salt-call state.highstate -l info queue=True
|
salt-call state.highstate -l info queue=True
|
||||||
else
|
else
|
||||||
echo ""
|
echo ""
|
||||||
@@ -1032,9 +1099,6 @@ main() {
|
|||||||
echo "Updating dockers to $NEWVERSION."
|
echo "Updating dockers to $NEWVERSION."
|
||||||
if [[ $is_airgap -eq 0 ]]; then
|
if [[ $is_airgap -eq 0 ]]; then
|
||||||
airgap_update_dockers
|
airgap_update_dockers
|
||||||
update_centos_repo
|
|
||||||
yum clean all
|
|
||||||
check_os_updates
|
|
||||||
# if not airgap but -f was used
|
# if not airgap but -f was used
|
||||||
elif [[ ! -z "$ISOLOC" ]]; then
|
elif [[ ! -z "$ISOLOC" ]]; then
|
||||||
airgap_update_dockers
|
airgap_update_dockers
|
||||||
@@ -1057,21 +1121,6 @@ main() {
|
|||||||
echo "Upgrading Salt"
|
echo "Upgrading Salt"
|
||||||
# Update the repo files so it can actually upgrade
|
# Update the repo files so it can actually upgrade
|
||||||
upgrade_salt
|
upgrade_salt
|
||||||
|
|
||||||
echo "Checking if Salt was upgraded."
|
|
||||||
echo ""
|
|
||||||
# Check that Salt was upgraded
|
|
||||||
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
|
||||||
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
|
|
||||||
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
|
||||||
echo "Once the issue is resolved, run soup again."
|
|
||||||
echo "Exiting."
|
|
||||||
echo ""
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "Salt upgrade success."
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
preupgrade_changes
|
preupgrade_changes
|
||||||
@@ -1127,9 +1176,7 @@ main() {
|
|||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Enabling highstate."
|
enable_highstate
|
||||||
salt-call state.enable highstate -l info --local
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Running a highstate. This could take several minutes."
|
echo "Running a highstate. This could take several minutes."
|
||||||
|
|||||||
@@ -19,11 +19,37 @@
|
|||||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||||
{% set MANAGER = salt['grains.get']('master') %}
|
{% set MANAGER = salt['grains.get']('master') %}
|
||||||
|
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||||
|
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||||
|
{% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %}
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- idh.openssh.config
|
- idh.openssh.config
|
||||||
|
- firewall
|
||||||
|
|
||||||
# IDH State
|
|
||||||
|
# If True, block IDH Services from accepting connections on Managment IP
|
||||||
|
{% if RESTRICTIDHSERVICES %}
|
||||||
|
{% from 'idh/opencanary_config.map.jinja' import OPENCANARYCONFIG %}
|
||||||
|
{% set idh_services = salt['pillar.get']('idh:services', []) %}
|
||||||
|
|
||||||
|
{% for service in idh_services %}
|
||||||
|
{% if service in ["smnp","ntp", "tftp"] %}
|
||||||
|
{% set proto = 'udp' %}
|
||||||
|
{% else %}
|
||||||
|
{% set proto = 'tcp' %}
|
||||||
|
{% endif %}
|
||||||
|
block_mgt_ip_idh_services_{{ proto }}_{{ OPENCANARYCONFIG[service~'.port'] }} :
|
||||||
|
iptables.insert:
|
||||||
|
- table: filter
|
||||||
|
- chain: INPUT
|
||||||
|
- jump: DROP
|
||||||
|
- position: 1
|
||||||
|
- proto: {{ proto }}
|
||||||
|
- dport: {{ OPENCANARYCONFIG[service~'.port'] }}
|
||||||
|
- destination: {{ MAINIP }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Create a config directory
|
# Create a config directory
|
||||||
temp:
|
temp:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{% set measurements = salt['cmd.shell']('docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -database telegraf -execute "show measurements" 2> /root/measurement_query.log | jq -r .results[0].series[0].values[]?[0] 2>> /root/measurement_query.log') %}
|
{% set measurements = salt['cmd.shell']('docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -database telegraf -execute "show measurements" 2> /root/measurement_query.log | jq -r .results[0].series[0].values[]?[0] 2>> /root/measurement_query.log', shell='/bin/bash') %}
|
||||||
|
|
||||||
influxdb:
|
influxdb:
|
||||||
retention_policies:
|
retention_policies:
|
||||||
|
|||||||
98
salt/repo/client/centos.sls
Normal file
98
salt/repo/client/centos.sls
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
{% from 'repo/client/map.jinja' import ABSENTFILES with context %}
|
||||||
|
{% from 'repo/client/map.jinja' import REPOPATH with context %}
|
||||||
|
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
||||||
|
{% set managerupdates = salt['pillar.get']('global:managerupdate', 0) %}
|
||||||
|
{% set role = grains.id.split('_') | last %}
|
||||||
|
|
||||||
|
# from airgap state
|
||||||
|
{% if ISAIRGAP and grains.os == 'CentOS' %}
|
||||||
|
{% set MANAGER = salt['grains.get']('master') %}
|
||||||
|
airgapyum:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/yum/yum.conf
|
||||||
|
- source: salt://repo/client/files/centos/airgap/yum.conf
|
||||||
|
|
||||||
|
airgap_repo:
|
||||||
|
pkgrepo.managed:
|
||||||
|
- humanname: Airgap Repo
|
||||||
|
- baseurl: https://{{ MANAGER }}/repo
|
||||||
|
- gpgcheck: 0
|
||||||
|
- sslverify: 0
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# from airgap and common
|
||||||
|
{% if ABSENTFILES|length > 0%}
|
||||||
|
{% for file in ABSENTFILES %}
|
||||||
|
{{ file }}:
|
||||||
|
file.absent:
|
||||||
|
- name: {{ REPOPATH }}{{ file }}
|
||||||
|
- onchanges_in:
|
||||||
|
- cmd: cleanyum
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# from common state
|
||||||
|
# Remove default Repos
|
||||||
|
{% if grains['os'] == 'CentOS' %}
|
||||||
|
repair_yumdb:
|
||||||
|
cmd.run:
|
||||||
|
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
|
||||||
|
- onlyif:
|
||||||
|
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
|
||||||
|
|
||||||
|
crsynckeys:
|
||||||
|
file.recurse:
|
||||||
|
- name: /etc/pki/rpm_gpg
|
||||||
|
- source: salt://repo/client/files/centos/keys/
|
||||||
|
|
||||||
|
{% if not ISAIRGAP %}
|
||||||
|
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
|
||||||
|
remove_securityonionrepocache:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/yum.repos.d/securityonioncache.repo
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if role not in ['eval', 'standalone', 'import', 'manager', 'managersearch'] and managerupdates == 1 %}
|
||||||
|
remove_securityonionrepo:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/yum.repos.d/securityonion.repo
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
crsecurityonionrepo:
|
||||||
|
file.managed:
|
||||||
|
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
|
||||||
|
- name: /etc/yum.repos.d/securityonion.repo
|
||||||
|
- source: salt://repo/client/files/centos/securityonion.repo
|
||||||
|
{% else %}
|
||||||
|
- name: /etc/yum.repos.d/securityonioncache.repo
|
||||||
|
- source: salt://repo/client/files/centos/securityonioncache.repo
|
||||||
|
{% endif %}
|
||||||
|
- mode: 644
|
||||||
|
|
||||||
|
yumconf:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/yum.conf
|
||||||
|
- source: salt://repo/client/files/centos/yum.conf.jinja
|
||||||
|
- mode: 644
|
||||||
|
- template: jinja
|
||||||
|
- show_changes: False
|
||||||
|
|
||||||
|
cleanairgap:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/yum.repos.d/airgap_repo.repo
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
cleanyum:
|
||||||
|
cmd.run:
|
||||||
|
- name: 'yum clean metadata'
|
||||||
|
- onchanges:
|
||||||
|
{% if ISAIRGAP %}
|
||||||
|
- file: airgapyum
|
||||||
|
- pkgrepo: airgap_repo
|
||||||
|
{% else %}
|
||||||
|
- file: crsecurityonionrepo
|
||||||
|
- file: yumconf
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -1,98 +1,2 @@
|
|||||||
{% from 'repo/client/map.jinja' import ABSENTFILES with context %}
|
include:
|
||||||
{% from 'repo/client/map.jinja' import REPOPATH with context %}
|
- repo.client.{{grains.os | lower}}
|
||||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
|
||||||
{% set managerupdates = salt['pillar.get']('global:managerupdate', 0) %}
|
|
||||||
{% set role = grains.id.split('_') | last %}
|
|
||||||
|
|
||||||
# from airgap state
|
|
||||||
{% if ISAIRGAP and grains.os == 'CentOS' %}
|
|
||||||
{% set MANAGER = salt['grains.get']('master') %}
|
|
||||||
airgapyum:
|
|
||||||
file.managed:
|
|
||||||
- name: /etc/yum/yum.conf
|
|
||||||
- source: salt://repo/client/files/centos/airgap/yum.conf
|
|
||||||
|
|
||||||
airgap_repo:
|
|
||||||
pkgrepo.managed:
|
|
||||||
- humanname: Airgap Repo
|
|
||||||
- baseurl: https://{{ MANAGER }}/repo
|
|
||||||
- gpgcheck: 0
|
|
||||||
- sslverify: 0
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# from airgap and common
|
|
||||||
{% if ABSENTFILES|length > 0%}
|
|
||||||
{% for file in ABSENTFILES %}
|
|
||||||
{{ file }}:
|
|
||||||
file.absent:
|
|
||||||
- name: {{ REPOPATH }}{{ file }}
|
|
||||||
- onchanges_in:
|
|
||||||
- cmd: cleanyum
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# from common state
|
|
||||||
# Remove default Repos
|
|
||||||
{% if grains['os'] == 'CentOS' %}
|
|
||||||
repair_yumdb:
|
|
||||||
cmd.run:
|
|
||||||
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
|
|
||||||
- onlyif:
|
|
||||||
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
|
|
||||||
|
|
||||||
crsynckeys:
|
|
||||||
file.recurse:
|
|
||||||
- name: /etc/pki/rpm-gpg
|
|
||||||
- source: salt://repo/client/files/centos/keys/
|
|
||||||
|
|
||||||
{% if not ISAIRGAP %}
|
|
||||||
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
|
|
||||||
remove_securityonionrepocache:
|
|
||||||
file.absent:
|
|
||||||
- name: /etc/yum.repos.d/securityonioncache.repo
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if role not in ['eval', 'standalone', 'import', 'manager', 'managersearch'] and managerupdates == 1 %}
|
|
||||||
remove_securityonionrepo:
|
|
||||||
file.absent:
|
|
||||||
- name: /etc/yum.repos.d/securityonion.repo
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
crsecurityonionrepo:
|
|
||||||
file.managed:
|
|
||||||
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
|
|
||||||
- name: /etc/yum.repos.d/securityonion.repo
|
|
||||||
- source: salt://repo/client/files/centos/securityonion.repo
|
|
||||||
{% else %}
|
|
||||||
- name: /etc/yum.repos.d/securityonioncache.repo
|
|
||||||
- source: salt://repo/client/files/centos/securityonioncache.repo
|
|
||||||
{% endif %}
|
|
||||||
- mode: 644
|
|
||||||
|
|
||||||
yumconf:
|
|
||||||
file.managed:
|
|
||||||
- name: /etc/yum.conf
|
|
||||||
- source: salt://repo/client/files/centos/yum.conf.jinja
|
|
||||||
- mode: 644
|
|
||||||
- template: jinja
|
|
||||||
- show_changes: False
|
|
||||||
|
|
||||||
cleanairgap:
|
|
||||||
file.absent:
|
|
||||||
- name: /etc/yum.repos.d/airgap_repo.repo
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
cleanyum:
|
|
||||||
cmd.run:
|
|
||||||
- name: 'yum clean metadata'
|
|
||||||
- onchanges:
|
|
||||||
{% if ISAIRGAP %}
|
|
||||||
- file: airgapyum
|
|
||||||
- pkgrepo: airgap_repo
|
|
||||||
{% else %}
|
|
||||||
- file: crsecurityonionrepo
|
|
||||||
- file: yumconf
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
20
salt/repo/client/ubuntu.sls
Normal file
20
salt/repo/client/ubuntu.sls
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# this removes the repo file left by bootstrap-salt.sh without -r
|
||||||
|
remove_salt.list:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/apt/sources.list.d/salt.list
|
||||||
|
|
||||||
|
saltstack.list:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/apt/sources.list.d/saltstack.list
|
||||||
|
- contents:
|
||||||
|
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt/ {{grains.oscodename}} main
|
||||||
|
|
||||||
|
apt_update:
|
||||||
|
cmd.run:
|
||||||
|
- name: apt-get update
|
||||||
|
- onchanges:
|
||||||
|
- file: saltstack.list
|
||||||
|
- timeout: 30
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
@@ -31,7 +31,7 @@
|
|||||||
{% if grains.os|lower in ['centos', 'redhat'] %}
|
{% if grains.os|lower in ['centos', 'redhat'] %}
|
||||||
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
|
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
|
||||||
{% elif grains.os|lower == 'ubuntu' %}
|
{% elif grains.os|lower == 'ubuntu' %}
|
||||||
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %}
|
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}
|
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}
|
||||||
|
|||||||
@@ -2,4 +2,4 @@
|
|||||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||||
salt:
|
salt:
|
||||||
master:
|
master:
|
||||||
version: 3004
|
version: 3004.1
|
||||||
|
|||||||
@@ -2,6 +2,6 @@
|
|||||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||||
salt:
|
salt:
|
||||||
minion:
|
minion:
|
||||||
version: 3004
|
version: 3004.1
|
||||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||||
service_start_delay: 30 # in seconds.
|
service_start_delay: 30 # in seconds.
|
||||||
|
|||||||
@@ -32,6 +32,22 @@ install_salt_minion:
|
|||||||
exec 1>&- # close stdout
|
exec 1>&- # close stdout
|
||||||
exec 2>&- # close stderr
|
exec 2>&- # close stderr
|
||||||
nohup /bin/sh -c '{{ UPGRADECOMMAND }}' &
|
nohup /bin/sh -c '{{ UPGRADECOMMAND }}' &
|
||||||
|
|
||||||
|
{# if we are the salt master #}
|
||||||
|
{% if grains.id.split('_')|first == grains.master %}
|
||||||
|
remove_influxdb_continuous_query_state_file:
|
||||||
|
file.absent:
|
||||||
|
- name: /opt/so/state/influxdb_continuous_query.py.patched
|
||||||
|
|
||||||
|
remove_influxdbmod_state_file:
|
||||||
|
file.absent:
|
||||||
|
- name: /opt/so/state/influxdbmod.py.patched
|
||||||
|
|
||||||
|
remove_influxdb_retention_policy_state_file:
|
||||||
|
file.absent:
|
||||||
|
- name: /opt/so/state/influxdb_retention_policy.py.patched
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
||||||
|
|||||||
@@ -20,16 +20,15 @@ base:
|
|||||||
|
|
||||||
'*':
|
'*':
|
||||||
- cron.running
|
- cron.running
|
||||||
|
- repo.client
|
||||||
|
|
||||||
'not G@saltversion:{{saltversion}}':
|
'not G@saltversion:{{saltversion}}':
|
||||||
- match: compound
|
- match: compound
|
||||||
- salt.minion-state-apply-test
|
- salt.minion-state-apply-test
|
||||||
- repo.client
|
|
||||||
- salt.minion
|
- salt.minion
|
||||||
|
|
||||||
'G@os:CentOS and G@saltversion:{{saltversion}}':
|
'G@os:CentOS and G@saltversion:{{saltversion}}':
|
||||||
- match: compound
|
- match: compound
|
||||||
- repo.client
|
|
||||||
- yum.packages
|
- yum.packages
|
||||||
|
|
||||||
'* and G@saltversion:{{saltversion}}':
|
'* and G@saltversion:{{saltversion}}':
|
||||||
|
|||||||
@@ -49,14 +49,14 @@ airgap_repo() {
|
|||||||
rm -rf /etc/yum.repos.d/*
|
rm -rf /etc/yum.repos.d/*
|
||||||
echo "[airgap_repo]" > /etc/yum.repos.d/airgap_repo.repo
|
echo "[airgap_repo]" > /etc/yum.repos.d/airgap_repo.repo
|
||||||
if $is_manager; then
|
if $is_manager; then
|
||||||
echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo
|
echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo
|
||||||
else
|
else
|
||||||
echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo
|
echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo
|
||||||
fi
|
fi
|
||||||
echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo
|
echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo
|
||||||
echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo
|
echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo
|
||||||
echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo
|
echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo
|
||||||
echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo
|
echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo
|
||||||
}
|
}
|
||||||
|
|
||||||
airgap_rules() {
|
airgap_rules() {
|
||||||
@@ -138,6 +138,21 @@ analyze_system() {
|
|||||||
logCmd "ip a"
|
logCmd "ip a"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
analyst_workstation_pillar() {
|
||||||
|
|
||||||
|
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||||
|
|
||||||
|
# Create the analyst workstation pillar
|
||||||
|
printf '%s\n'\
|
||||||
|
"host:"\
|
||||||
|
" mainint: '$MNIC'"\
|
||||||
|
"workstation:"\
|
||||||
|
" gui:"\
|
||||||
|
" enabled: true" >> "$pillar_file"\
|
||||||
|
"sensoroni:"\
|
||||||
|
" node_description: '${NODE_DESCRIPTION//\'/''}'" > $pillar_file
|
||||||
|
}
|
||||||
|
|
||||||
calculate_useable_cores() {
|
calculate_useable_cores() {
|
||||||
|
|
||||||
# Calculate reasonable core usage
|
# Calculate reasonable core usage
|
||||||
@@ -435,6 +450,13 @@ collect_hostname_validate() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
collect_idh_preferences() {
|
||||||
|
IDHMGTRESTRICT='False'
|
||||||
|
whiptail_idh_preferences
|
||||||
|
|
||||||
|
if [[ "$idh_preferences" != "" ]]; then IDHMGTRESTRICT='True'; fi
|
||||||
|
}
|
||||||
|
|
||||||
collect_idh_services() {
|
collect_idh_services() {
|
||||||
whiptail_idh_services
|
whiptail_idh_services
|
||||||
|
|
||||||
@@ -766,6 +788,9 @@ collect_zeek() {
|
|||||||
|
|
||||||
configure_minion() {
|
configure_minion() {
|
||||||
local minion_type=$1
|
local minion_type=$1
|
||||||
|
if [[ $is_analyst ]]; then
|
||||||
|
minion_type=workstation
|
||||||
|
fi
|
||||||
echo "Configuring minion type as $minion_type" >> "$setup_log" 2>&1
|
echo "Configuring minion type as $minion_type" >> "$setup_log" 2>&1
|
||||||
echo "role: so-$minion_type" > /etc/salt/grains
|
echo "role: so-$minion_type" > /etc/salt/grains
|
||||||
|
|
||||||
@@ -774,6 +799,9 @@ configure_minion() {
|
|||||||
echo "id: '$MINION_ID'" > "$minion_config"
|
echo "id: '$MINION_ID'" > "$minion_config"
|
||||||
|
|
||||||
case "$minion_type" in
|
case "$minion_type" in
|
||||||
|
'workstation')
|
||||||
|
echo "master: '$MSRV'" >> "$minion_config"
|
||||||
|
;;
|
||||||
'helix')
|
'helix')
|
||||||
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
|
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
|
||||||
echo "master: '$HOSTNAME'" >> "$minion_config"
|
echo "master: '$HOSTNAME'" >> "$minion_config"
|
||||||
@@ -1108,6 +1136,7 @@ detect_os() {
|
|||||||
echo "Detecting Base OS" >> "$log" 2>&1
|
echo "Detecting Base OS" >> "$log" 2>&1
|
||||||
if [ -f /etc/redhat-release ]; then
|
if [ -f /etc/redhat-release ]; then
|
||||||
OS=centos
|
OS=centos
|
||||||
|
is_centos=true
|
||||||
if grep -q "CentOS Linux release 7" /etc/redhat-release; then
|
if grep -q "CentOS Linux release 7" /etc/redhat-release; then
|
||||||
OSVER=7
|
OSVER=7
|
||||||
elif grep -q "CentOS Linux release 8" /etc/redhat-release; then
|
elif grep -q "CentOS Linux release 8" /etc/redhat-release; then
|
||||||
@@ -1207,7 +1236,7 @@ disable_ipv6() {
|
|||||||
|
|
||||||
docker_install() {
|
docker_install() {
|
||||||
|
|
||||||
if [ $OS = 'centos' ]; then
|
if [[ $is_centos ]]; then
|
||||||
logCmd "yum clean expire-cache"
|
logCmd "yum clean expire-cache"
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! $is_iso ]]; then
|
||||||
logCmd "yum -y install docker-ce-20.10.5-3.el7 docker-ce-cli-20.10.5-3.el7 docker-ce-rootless-extras-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7"
|
logCmd "yum -y install docker-ce-20.10.5-3.el7 docker-ce-cli-20.10.5-3.el7 docker-ce-rootless-extras-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7"
|
||||||
@@ -1229,15 +1258,15 @@ docker_install() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
if [ $OSVER == "bionic" ]; then
|
if [ $OSVER == "bionic" ]; then
|
||||||
service docker stop
|
service docker stop
|
||||||
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
|
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
|
||||||
retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1
|
||||||
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
|
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
|
||||||
elif [ $OSVER == "focal" ]; then
|
elif [ $OSVER == "focal" ]; then
|
||||||
service docker stop
|
service docker stop
|
||||||
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
|
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
|
||||||
retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1
|
||||||
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
|
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
docker_registry
|
docker_registry
|
||||||
@@ -1368,9 +1397,9 @@ es_heapsize() {
|
|||||||
# Set heap size to 33% of available memory
|
# Set heap size to 33% of available memory
|
||||||
ES_HEAP_SIZE=$(( total_mem / 3 ))
|
ES_HEAP_SIZE=$(( total_mem / 3 ))
|
||||||
if [ "$ES_HEAP_SIZE" -ge 25001 ] ; then
|
if [ "$ES_HEAP_SIZE" -ge 25001 ] ; then
|
||||||
ES_HEAP_SIZE="25000m"
|
ES_HEAP_SIZE="25000m"
|
||||||
else
|
else
|
||||||
ES_HEAP_SIZE=$ES_HEAP_SIZE"m"
|
ES_HEAP_SIZE=$ES_HEAP_SIZE"m"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
export ES_HEAP_SIZE
|
export ES_HEAP_SIZE
|
||||||
@@ -1728,7 +1757,7 @@ manager_global() {
|
|||||||
" managerip: '$MAINIP'" > "$global_pillar"
|
" managerip: '$MAINIP'" > "$global_pillar"
|
||||||
|
|
||||||
if [[ $HIGHLANDER == 'True' ]]; then
|
if [[ $HIGHLANDER == 'True' ]]; then
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
" highlander: True"\ >> "$global_pillar"
|
" highlander: True"\ >> "$global_pillar"
|
||||||
fi
|
fi
|
||||||
if [[ $is_airgap ]]; then
|
if [[ $is_airgap ]]; then
|
||||||
@@ -1758,14 +1787,15 @@ manager_global() {
|
|||||||
" enabled: $STRELKA"\
|
" enabled: $STRELKA"\
|
||||||
" rules: 1" >> "$global_pillar"
|
" rules: 1" >> "$global_pillar"
|
||||||
if [[ $is_airgap ]]; then
|
if [[ $is_airgap ]]; then
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
" repos:"\
|
" repos:"\
|
||||||
" - 'https://$HOSTNAME/repo/rules/strelka'" >> "$global_pillar"
|
" - 'https://$HOSTNAME/repo/rules/strelka'" >> "$global_pillar"
|
||||||
else
|
else
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
" repos:"\
|
" repos:"\
|
||||||
" - 'https://github.com/Neo23x0/signature-base'" >> "$global_pillar"
|
" - 'https://github.com/Neo23x0/signature-base'" >> "$global_pillar"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
"curator:"\
|
"curator:"\
|
||||||
" hot_warm: False"\
|
" hot_warm: False"\
|
||||||
@@ -1793,101 +1823,101 @@ manager_global() {
|
|||||||
" cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\
|
" cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\
|
||||||
" index_settings:"\
|
" index_settings:"\
|
||||||
" so-beats:"\
|
" so-beats:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-endgame:"\
|
" so-endgame:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-firewall:"\
|
" so-firewall:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-flow:"\
|
" so-flow:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-ids:"\
|
" so-ids:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-import:"\
|
" so-import:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 73000"\
|
" close: 73000"\
|
||||||
" delete: 73001"\
|
" delete: 73001"\
|
||||||
" so-osquery:"\
|
" so-osquery:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-ossec:"\
|
" so-ossec:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-strelka:"\
|
" so-strelka:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-syslog:"\
|
" so-syslog:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 30"\
|
" close: 30"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
" so-zeek:"\
|
" so-zeek:"\
|
||||||
" index_template:"\
|
" index_template:"\
|
||||||
" template:"\
|
" template:"\
|
||||||
" settings:"\
|
" settings:"\
|
||||||
" index:"\
|
" index:"\
|
||||||
" number_of_shards: 1"\
|
" number_of_shards: 1"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 45"\
|
" close: 45"\
|
||||||
" delete: 365"\
|
" delete: 365"\
|
||||||
@@ -2178,7 +2208,7 @@ reset_proxy() {
|
|||||||
|
|
||||||
[[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig
|
[[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig
|
||||||
|
|
||||||
if [[ $OS == 'centos' ]]; then
|
if [[ $is_centos ]]; then
|
||||||
sed -i "/proxy=/d" /etc/yum.conf
|
sed -i "/proxy=/d" /etc/yum.conf
|
||||||
else
|
else
|
||||||
[[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf
|
[[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf
|
||||||
@@ -2206,7 +2236,7 @@ backup_dir() {
|
|||||||
|
|
||||||
remove_package() {
|
remove_package() {
|
||||||
local package_name=$1
|
local package_name=$1
|
||||||
if [ $OS = 'centos' ]; then
|
if [[ $is_centos ]]; then
|
||||||
if rpm -qa | grep -q "$package_name"; then
|
if rpm -qa | grep -q "$package_name"; then
|
||||||
logCmd "yum remove -y $package_name"
|
logCmd "yum remove -y $package_name"
|
||||||
fi
|
fi
|
||||||
@@ -2227,39 +2257,42 @@ remove_package() {
|
|||||||
saltify() {
|
saltify() {
|
||||||
|
|
||||||
# Install updates and Salt
|
# Install updates and Salt
|
||||||
if [ $OS = 'centos' ]; then
|
if [[ $is_centos ]]; then
|
||||||
set_progress_str 6 'Installing various dependencies'
|
set_progress_str 6 'Installing various dependencies'
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then
|
||||||
logCmd "yum -y install wget nmap-ncat"
|
logCmd "yum -y install wget nmap-ncat"
|
||||||
fi
|
fi
|
||||||
case "$install_type" in
|
|
||||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
|
if [[ ! $is_analyst ]]; then
|
||||||
reserve_group_ids
|
case "$install_type" in
|
||||||
if [[ ! $is_iso ]]; then
|
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
|
||||||
logCmd "yum -y install sqlite curl mariadb-devel"
|
reserve_group_ids
|
||||||
fi
|
if [[ ! $is_iso ]]; then
|
||||||
# Download Ubuntu Keys in case manager updates = 1
|
logCmd "yum -y install sqlite curl mariadb-devel"
|
||||||
logCmd "mkdir -vp /opt/so/gpg"
|
fi
|
||||||
if [[ ! $is_airgap ]]; then
|
# Download Ubuntu Keys in case manager updates = 1
|
||||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3004/SALTSTACK-GPG-KEY.pub"
|
logCmd "mkdir -vp /opt/so/gpg"
|
||||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
if [[ ! $is_airgap ]]; then
|
||||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/SALTSTACK-GPG-KEY.pub"
|
||||||
fi
|
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
||||||
set_progress_str 7 'Installing salt-master'
|
logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
||||||
if [[ ! $is_iso ]]; then
|
fi
|
||||||
logCmd "yum -y install salt-master-3004"
|
set_progress_str 7 'Installing salt-master'
|
||||||
fi
|
if [[ ! $is_iso ]]; then
|
||||||
logCmd "systemctl enable salt-master"
|
logCmd "yum -y install salt-master-3004.1"
|
||||||
;;
|
fi
|
||||||
*)
|
logCmd "systemctl enable salt-master"
|
||||||
;;
|
;;
|
||||||
esac
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
if [[ ! $is_airgap ]]; then
|
if [[ ! $is_airgap ]]; then
|
||||||
logCmd "yum clean expire-cache"
|
logCmd "yum clean expire-cache"
|
||||||
fi
|
fi
|
||||||
set_progress_str 8 'Installing salt-minion & python modules'
|
set_progress_str 8 'Installing salt-minion & python modules'
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then
|
||||||
logCmd "yum -y install salt-minion-3004 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
||||||
logCmd "yum -y update --exclude=salt*"
|
logCmd "yum -y update --exclude=salt*"
|
||||||
fi
|
fi
|
||||||
logCmd "systemctl enable salt-minion"
|
logCmd "systemctl enable salt-minion"
|
||||||
@@ -2298,8 +2331,8 @@ saltify() {
|
|||||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
|
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
|
||||||
|
|
||||||
# Add saltstack repo(s)
|
# Add saltstack repo(s)
|
||||||
wget -q --inet4-only -O - https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3004/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
|
wget -q --inet4-only -O - https://repo.securityonion.net/file/securityonion-repo/ubuntu/"$ubuntu_version"/amd64/salt/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
|
||||||
echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3004 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||||
|
|
||||||
# Add Docker repo
|
# Add Docker repo
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
|
||||||
@@ -2307,7 +2340,7 @@ saltify() {
|
|||||||
|
|
||||||
# Get gpg keys
|
# Get gpg keys
|
||||||
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
||||||
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3004/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.securityonion.net/file/securityonion-repo/ubuntu/"$ubuntu_version"/amd64/salt/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||||
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
|
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
|
||||||
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
||||||
|
|
||||||
@@ -2320,7 +2353,7 @@ saltify() {
|
|||||||
set_progress_str 6 'Installing various dependencies'
|
set_progress_str 6 'Installing various dependencies'
|
||||||
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
||||||
set_progress_str 7 'Installing salt-master'
|
set_progress_str 7 'Installing salt-master'
|
||||||
retry 50 10 "apt-get -y install salt-master=3004+ds-1" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install salt-master=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||||
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@@ -2331,14 +2364,14 @@ saltify() {
|
|||||||
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
|
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
|
||||||
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||||
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
||||||
echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3004/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||||
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
|
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
|
||||||
set_progress_str 8 'Installing salt-minion & python modules'
|
set_progress_str 8 'Installing salt-minion & python modules'
|
||||||
retry 50 10 "apt-get -y install salt-minion=3004+ds-1 salt-common=3004+ds-1" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install salt-minion=3004.1+ds-1 salt-common=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||||
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
||||||
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" >> "$setup_log" 2>&1 || exit 1
|
||||||
fi
|
fi
|
||||||
@@ -2371,7 +2404,7 @@ secrets_pillar(){
|
|||||||
|
|
||||||
securityonion_repo() {
|
securityonion_repo() {
|
||||||
# Remove all the current repos
|
# Remove all the current repos
|
||||||
if [[ "$OS" == "centos" ]]; then
|
if [[ $is_centos ]]; then
|
||||||
if [[ "$INTERWEBS" == "AIRGAP" ]]; then
|
if [[ "$INTERWEBS" == "AIRGAP" ]]; then
|
||||||
echo "This is airgap I don't need to add this repo"
|
echo "This is airgap I don't need to add this repo"
|
||||||
else
|
else
|
||||||
@@ -2405,8 +2438,28 @@ set_network_dev_status_list() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
set_main_ip() {
|
set_main_ip() {
|
||||||
MAINIP=$(ip route get 1 | awk '{print $7;exit}')
|
local count=0
|
||||||
MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2)
|
local progress='.'
|
||||||
|
local c=0
|
||||||
|
local m=3.3
|
||||||
|
local max_attempts=30
|
||||||
|
echo "Gathering the management IP. "
|
||||||
|
while ! valid_ip4 "$MAINIP" || ! valid_ip4 "$MNIC_IP"; do
|
||||||
|
MAINIP=$(ip route get 1 | awk '{print $7;exit}')
|
||||||
|
MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2)
|
||||||
|
((count=count+1))
|
||||||
|
p=$(awk -vp=$m -vq=$count 'BEGIN{printf "%.0f" ,p * q}')
|
||||||
|
printf "%-*s" $((count+1)) '[' | tr ' ' '#'
|
||||||
|
printf "%*s%3d%%\r" $((max_attempts-count)) "]" "$p"
|
||||||
|
if [ $count = $max_attempts ]; then
|
||||||
|
echo "ERROR: Could not determine MAINIP or MNIC_IP." >> "$setup_log" 2>&1
|
||||||
|
echo "MAINIP=$MAINIP" >> "$setup_log" 2>&1
|
||||||
|
echo "MNIC_IP=$MNIC_IP" >> "$setup_log" 2>&1
|
||||||
|
whiptail_error_message "The management IP could not be determined. Please check the log at /root/sosetup.log and verify the network configuration. Press OK to exit."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add /usr/sbin to everyone's path
|
# Add /usr/sbin to everyone's path
|
||||||
@@ -2462,7 +2515,7 @@ set_proxy() {
|
|||||||
"}" > /root/.docker/config.json
|
"}" > /root/.docker/config.json
|
||||||
|
|
||||||
# Set proxy for package manager
|
# Set proxy for package manager
|
||||||
if [ "$OS" = 'centos' ]; then
|
if [[ $is_centos ]]; then
|
||||||
echo "proxy=$so_proxy" >> /etc/yum.conf
|
echo "proxy=$so_proxy" >> /etc/yum.conf
|
||||||
else
|
else
|
||||||
# Set it up so the updates roll through the manager
|
# Set it up so the updates roll through the manager
|
||||||
@@ -2633,8 +2686,8 @@ set_initial_firewall_policy() {
|
|||||||
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
|
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
|
||||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
|
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
|
||||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
|
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
|
||||||
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
|
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
|
||||||
case "$install_type" in
|
case "$install_type" in
|
||||||
'EVAL')
|
'EVAL')
|
||||||
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True
|
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True
|
||||||
@@ -2650,7 +2703,7 @@ set_initial_firewall_policy() {
|
|||||||
'HELIXSENSOR')
|
'HELIXSENSOR')
|
||||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
|
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
|
||||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||||
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
|
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
|
||||||
;;
|
;;
|
||||||
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET' | 'IDH' | 'RECEIVER')
|
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET' | 'IDH' | 'RECEIVER')
|
||||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||||
@@ -2690,6 +2743,13 @@ set_initial_firewall_policy() {
|
|||||||
# TODO: implement
|
# TODO: implement
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
# Add some firewall rules for analyst workstations that get added to the grid
|
||||||
|
if [[ $is_analyst ]]; then
|
||||||
|
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||||
|
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost analyst "$MAINIP"
|
||||||
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Set up the management interface on the ISO
|
# Set up the management interface on the ISO
|
||||||
@@ -2741,7 +2801,7 @@ set_redirect() {
|
|||||||
|
|
||||||
set_updates() {
|
set_updates() {
|
||||||
if [ "$MANAGERUPDATES" = '1' ]; then
|
if [ "$MANAGERUPDATES" = '1' ]; then
|
||||||
if [ "$OS" = 'centos' ]; then
|
if [[ $is_centos ]]; then
|
||||||
if [[ ! $is_airgap ]] && ! ( grep -q "$MSRV" /etc/yum.conf); then
|
if [[ ! $is_airgap ]] && ! ( grep -q "$MSRV" /etc/yum.conf); then
|
||||||
if grep -q "proxy=" /etc/yum.conf; then
|
if grep -q "proxy=" /etc/yum.conf; then
|
||||||
sed -i "s/proxy=.*/proxy=http:\/\/$MSRV:3142/" /etc/yum.conf
|
sed -i "s/proxy=.*/proxy=http:\/\/$MSRV:3142/" /etc/yum.conf
|
||||||
@@ -2808,9 +2868,9 @@ update_sudoers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
update_packages() {
|
update_packages() {
|
||||||
if [ "$OS" = 'centos' ]; then
|
if [[ $is_centos ]]; then
|
||||||
logCmd "yum repolist"
|
logCmd "yum repolist"
|
||||||
logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*"
|
logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*"
|
||||||
else
|
else
|
||||||
retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1
|
||||||
retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
|
||||||
@@ -2867,10 +2927,11 @@ write_out_idh_services() {
|
|||||||
|
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
"idh:"\
|
"idh:"\
|
||||||
|
" restrict_management_ip: $IDHMGTRESTRICT"\
|
||||||
" services:" >> "$pillar_file"
|
" services:" >> "$pillar_file"
|
||||||
for service in ${idh_services[@]}; do
|
for service in ${idh_services[@]}; do
|
||||||
echo " - $service" | tr '[:upper:]' '[:lower:]' >> "$pillar_file"
|
echo " - $service" | tr '[:upper:]' '[:lower:]' >> "$pillar_file"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
# Enable Zeek Logs
|
# Enable Zeek Logs
|
||||||
|
|||||||
175
setup/so-setup
175
setup/so-setup
@@ -71,9 +71,17 @@ while [[ $# -gt 0 ]]; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
detect_os
|
detect_os
|
||||||
|
is_analyst=
|
||||||
|
if [ "$setup_type" = 'analyst' ]; then
|
||||||
|
is_analyst=true
|
||||||
|
# Check to see if this is an ISO
|
||||||
|
if [ -d /root/SecurityOnion ]; then
|
||||||
|
is_analyst_iso=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$setup_type" == 'iso' ]]; then
|
if [[ "$setup_type" == 'iso' ]]; then
|
||||||
if [[ "$OS" == 'centos' ]]; then
|
if [[ $is_centos ]]; then
|
||||||
is_iso=true
|
is_iso=true
|
||||||
else
|
else
|
||||||
echo "Only use 'so-setup iso' for an ISO install on CentOS. Please run 'so-setup network' instead."
|
echo "Only use 'so-setup iso' for an ISO install on CentOS. Please run 'so-setup network' instead."
|
||||||
@@ -81,6 +89,31 @@ if [[ "$setup_type" == 'iso' ]]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check to see if this is an analyst install. If it is let's run things differently
|
||||||
|
|
||||||
|
if [[ $is_analyst ]]; then
|
||||||
|
|
||||||
|
# Make sure it's CentOS
|
||||||
|
if [[ ! $is_centos ]]; then
|
||||||
|
echo "Analyst Workstation is only supported on CentOS 7"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! whiptail_analyst_install; then
|
||||||
|
# Lets make this a standalone
|
||||||
|
echo "Enabling graphical interface and setting it to load at boot"
|
||||||
|
systemctl set-default graphical.target
|
||||||
|
startx
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If you got this far then you want to join the grid
|
||||||
|
is_minion=true
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if ! [ -f $install_opt_file ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then
|
if ! [ -f $install_opt_file ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then
|
||||||
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
|
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
|
||||||
fi
|
fi
|
||||||
@@ -112,7 +145,6 @@ catch() {
|
|||||||
whiptail_setup_failed
|
whiptail_setup_failed
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
automated=no
|
automated=no
|
||||||
progress() {
|
progress() {
|
||||||
local msg=${1:-'Please wait while installing...'}
|
local msg=${1:-'Please wait while installing...'}
|
||||||
@@ -156,11 +188,11 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
case "$setup_type" in
|
case "$setup_type" in
|
||||||
iso | network) # Accepted values
|
iso | network | analyst) # Accepted values
|
||||||
echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
|
echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid install type, must be 'iso' or 'network'" | tee -a $setup_log
|
echo "Invalid install type, must be 'iso', 'network' or 'analyst'." | tee -a $setup_log
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -202,6 +234,37 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
echo "User cancelled setup." | tee -a "$setup_log"
|
echo "User cancelled setup." | tee -a "$setup_log"
|
||||||
whiptail_cancel
|
whiptail_cancel
|
||||||
fi
|
fi
|
||||||
|
if [[ $is_analyst ]]; then
|
||||||
|
collect_hostname
|
||||||
|
if [[ $is_analyst_iso ]]; then
|
||||||
|
# Prompt Network Setup
|
||||||
|
whiptail_management_nic
|
||||||
|
whiptail_dhcp_or_static
|
||||||
|
|
||||||
|
if [ "$address_type" != 'DHCP' ]; then
|
||||||
|
collect_int_ip_mask
|
||||||
|
collect_gateway
|
||||||
|
collect_dns
|
||||||
|
collect_dns_domain
|
||||||
|
fi
|
||||||
|
|
||||||
|
fi
|
||||||
|
if [[ ! $is_analyst_iso ]]; then
|
||||||
|
# This should be a network install
|
||||||
|
whiptail_network_notice
|
||||||
|
whiptail_dhcp_warn
|
||||||
|
whiptail_management_nic
|
||||||
|
fi
|
||||||
|
whiptail_network_init_notice
|
||||||
|
network_init
|
||||||
|
printf '%s\n' \
|
||||||
|
"MNIC=$MNIC" \
|
||||||
|
"HOSTNAME=$HOSTNAME" > "$net_init_file"
|
||||||
|
set_main_ip
|
||||||
|
compare_main_nic_ip
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
|
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
|
||||||
whiptail_first_menu_iso
|
whiptail_first_menu_iso
|
||||||
if [[ $option == "CONFIGURENETWORK" ]]; then
|
if [[ $option == "CONFIGURENETWORK" ]]; then
|
||||||
@@ -212,14 +275,16 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
printf '%s\n' \
|
printf '%s\n' \
|
||||||
"MNIC=$MNIC" \
|
"MNIC=$MNIC" \
|
||||||
"HOSTNAME=$HOSTNAME" > "$net_init_file"
|
"HOSTNAME=$HOSTNAME" > "$net_init_file"
|
||||||
set_main_ip >> $setup_log 2>&1
|
set_main_ip
|
||||||
compare_main_nic_ip
|
compare_main_nic_ip
|
||||||
whiptail_net_setup_complete
|
whiptail_net_setup_complete
|
||||||
else
|
else
|
||||||
true
|
true
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
whiptail_install_type
|
if [[ ! $is_analyst ]]; then
|
||||||
|
whiptail_install_type
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
source $install_opt_file
|
source $install_opt_file
|
||||||
fi
|
fi
|
||||||
@@ -269,17 +334,13 @@ elif [ "$install_type" = 'RECEIVER' ]; then
|
|||||||
is_receiver=true
|
is_receiver=true
|
||||||
elif [ "$install_type" = 'ANALYST' ]; then
|
elif [ "$install_type" = 'ANALYST' ]; then
|
||||||
cd .. || exit 255
|
cd .. || exit 255
|
||||||
exec bash so-analyst-install
|
exec bash so-setup analyst
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_manager || $is_import ]]; then
|
if [[ $is_manager || $is_import ]]; then
|
||||||
check_elastic_license
|
check_elastic_license
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_idh ]]; then
|
|
||||||
collect_idh_services
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! [[ -f $install_opt_file ]]; then
|
if ! [[ -f $install_opt_file ]]; then
|
||||||
if [[ $is_manager && $is_sensor ]]; then
|
if [[ $is_manager && $is_sensor ]]; then
|
||||||
check_requirements "standalone"
|
check_requirements "standalone"
|
||||||
@@ -289,7 +350,7 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
check_requirements "dist" "idh"
|
check_requirements "dist" "idh"
|
||||||
elif [[ $is_sensor && ! $is_eval ]]; then
|
elif [[ $is_sensor && ! $is_eval ]]; then
|
||||||
check_requirements "dist" "sensor"
|
check_requirements "dist" "sensor"
|
||||||
elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then
|
elif [[ $is_distmanager || $is_minion ]] && [[ ! ( $is_import || $is_analyst ) ]]; then
|
||||||
check_requirements "dist"
|
check_requirements "dist"
|
||||||
elif [[ $is_import ]]; then
|
elif [[ $is_import ]]; then
|
||||||
check_requirements "import"
|
check_requirements "import"
|
||||||
@@ -314,26 +375,28 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
network_init
|
network_init
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_main_ip >> $setup_log 2>&1
|
set_main_ip
|
||||||
compare_main_nic_ip
|
compare_main_nic_ip
|
||||||
|
|
||||||
if [[ $is_minion ]]; then
|
if [[ $is_minion ]]; then
|
||||||
collect_mngr_hostname
|
collect_mngr_hostname
|
||||||
add_mngr_ip_to_hosts
|
add_mngr_ip_to_hosts
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $is_minion ]]; then
|
|
||||||
whiptail_ssh_key_copy_notice
|
whiptail_ssh_key_copy_notice
|
||||||
copy_ssh_key >> $setup_log 2>&1
|
copy_ssh_key >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $is_idh ]]; then
|
||||||
|
collect_idh_services
|
||||||
|
collect_idh_preferences
|
||||||
|
fi
|
||||||
|
|
||||||
# Check if this is an airgap install
|
# Check if this is an airgap install
|
||||||
if [[ ( $is_manager || $is_import) && $is_iso ]]; then
|
if [[ ( $is_manager || $is_import) && $is_iso ]]; then
|
||||||
whiptail_airgap
|
whiptail_airgap
|
||||||
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
|
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
|
||||||
is_airgap=true
|
is_airgap=true
|
||||||
fi
|
fi
|
||||||
elif [[ $is_minion && $is_iso ]]; then
|
elif [[ $is_minion && ( $is_iso || $is_analyst ) ]]; then
|
||||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" [[ -f /etc/yum.repos.d/airgap_repo.repo ]] >> $setup_log 2>&1
|
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" [[ -f /etc/yum.repos.d/airgap_repo.repo ]] >> $setup_log 2>&1
|
||||||
airgap_check=$?
|
airgap_check=$?
|
||||||
[[ $airgap_check == 0 ]] && is_airgap=true >> $setup_log 2>&1
|
[[ $airgap_check == 0 ]] && is_airgap=true >> $setup_log 2>&1
|
||||||
@@ -399,7 +462,12 @@ detect_cloud
|
|||||||
|
|
||||||
short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}')
|
short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}')
|
||||||
|
|
||||||
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]')
|
if [[ $is_analyst ]]; then
|
||||||
|
MINION_ID=$(echo "${short_name}_workstation" | tr '[:upper:]' '[:lower:]')
|
||||||
|
fi
|
||||||
|
if [[ ! $is_analyst ]]; then
|
||||||
|
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]')
|
||||||
|
fi
|
||||||
export MINION_ID
|
export MINION_ID
|
||||||
|
|
||||||
echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
|
echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
|
||||||
@@ -562,7 +630,7 @@ if [[ $is_sensor && ! $is_eval ]]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
[[ $is_iso ]] && collect_ntp_servers
|
[[ ( $is_iso || $is_analyst ) ]] && collect_ntp_servers
|
||||||
|
|
||||||
if [[ ($is_node || $is_receiver) && ! $is_eval ]]; then
|
if [[ ($is_node || $is_receiver) && ! $is_eval ]]; then
|
||||||
whiptail_node_advanced
|
whiptail_node_advanced
|
||||||
@@ -620,7 +688,9 @@ echo "1" > /root/accept_changes
|
|||||||
|
|
||||||
[[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1
|
[[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1
|
||||||
|
|
||||||
reserve_ports
|
if [[ ! $is_analyst ]]; then
|
||||||
|
reserve_ports
|
||||||
|
fi
|
||||||
|
|
||||||
set_path
|
set_path
|
||||||
|
|
||||||
@@ -650,8 +720,12 @@ echo "1" > /root/accept_changes
|
|||||||
if [[ $is_manager && ! $is_eval ]]; then
|
if [[ $is_manager && ! $is_eval ]]; then
|
||||||
add_soremote_user_manager >> $setup_log 2>&1
|
add_soremote_user_manager >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
if [[ ! $is_analyst ]]; then
|
||||||
host_pillar >> $setup_log 2>&1
|
host_pillar >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
if [[ $is_analyst ]]; then
|
||||||
|
analyst_workstation_pillar
|
||||||
|
fi
|
||||||
ntp_pillar >> $setup_log 2>&1
|
ntp_pillar >> $setup_log 2>&1
|
||||||
|
|
||||||
|
|
||||||
@@ -674,12 +748,12 @@ echo "1" > /root/accept_changes
|
|||||||
# Import the gpg keys
|
# Import the gpg keys
|
||||||
gpg_rpm_import >> $setup_log 2>&1
|
gpg_rpm_import >> $setup_log 2>&1
|
||||||
info "Disabling fastestmirror"
|
info "Disabling fastestmirror"
|
||||||
[[ $OS == 'centos' ]] && disable_fastestmirror
|
[[ $is_centos ]] && disable_fastestmirror
|
||||||
if [[ ! $is_airgap ]]; then
|
if [[ ! $is_airgap ]]; then
|
||||||
securityonion_repo >> $setup_log 2>&1
|
securityonion_repo >> $setup_log 2>&1
|
||||||
update_packages >> $setup_log 2>&1
|
update_packages >> $setup_log 2>&1
|
||||||
else
|
else
|
||||||
airgap_repo >> $setup_log 2>&1
|
airgap_repo >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_sensor || $is_helix || $is_import ]]; then
|
if [[ $is_sensor || $is_helix || $is_import ]]; then
|
||||||
@@ -699,15 +773,20 @@ echo "1" > /root/accept_changes
|
|||||||
set_progress_str 5 'Installing Salt and dependencies'
|
set_progress_str 5 'Installing Salt and dependencies'
|
||||||
saltify 2>> $setup_log
|
saltify 2>> $setup_log
|
||||||
|
|
||||||
set_progress_str 6 'Installing Docker and dependencies'
|
if [[ ! $is_analyst ]]; then
|
||||||
docker_install >> $setup_log 2>&1
|
set_progress_str 6 'Installing Docker and dependencies'
|
||||||
|
docker_install >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
set_progress_str 7 'Generating patch pillar'
|
set_progress_str 7 'Generating patch pillar'
|
||||||
patch_pillar >> $setup_log 2>&1
|
patch_pillar >> $setup_log 2>&1
|
||||||
|
|
||||||
set_progress_str 8 'Initializing Salt minion'
|
set_progress_str 8 'Initializing Salt minion'
|
||||||
configure_minion "$minion_type" >> $setup_log 2>&1
|
configure_minion "$minion_type" >> $setup_log 2>&1
|
||||||
check_sos_appliance >> $setup_log 2>&1
|
|
||||||
|
if [[ ! $is_analyst ]]; then
|
||||||
|
check_sos_appliance >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
update_sudoers_for_testing >> $setup_log 2>&1
|
update_sudoers_for_testing >> $setup_log 2>&1
|
||||||
|
|
||||||
@@ -786,8 +865,10 @@ echo "1" > /root/accept_changes
|
|||||||
generate_ca >> $setup_log 2>&1
|
generate_ca >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_progress_str 24 'Generating SSL'
|
if [[ ! $is_analyst ]]; then
|
||||||
generate_ssl >> $setup_log 2>&1
|
set_progress_str 24 'Generating SSL'
|
||||||
|
generate_ssl >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ $is_manager || $is_helix || $is_import ]]; then
|
if [[ $is_manager || $is_helix || $is_import ]]; then
|
||||||
set_progress_str 25 'Configuring firewall'
|
set_progress_str 25 'Configuring firewall'
|
||||||
@@ -814,18 +895,22 @@ echo "1" > /root/accept_changes
|
|||||||
echo "Finished so-elastic-auth..." >> $setup_log 2>&1
|
echo "Finished so-elastic-auth..." >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_progress_str 61 "$(print_salt_state_apply 'firewall')"
|
if [[ ! $is_analyst ]]; then
|
||||||
salt-call state.apply -l info firewall >> $setup_log 2>&1
|
set_progress_str 61 "$(print_salt_state_apply 'firewall')"
|
||||||
|
salt-call state.apply -l info firewall >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
if [ $OS = 'centos' ]; then
|
if [[ $is_centos ]]; then
|
||||||
set_progress_str 61 'Installing Yum utilities'
|
set_progress_str 61 'Installing Yum utilities'
|
||||||
salt-call state.apply -l info yum.packages >> $setup_log 2>&1
|
salt-call state.apply -l info yum.packages >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_progress_str 62 "$(print_salt_state_apply 'common')"
|
if [[ ! $is_analyst ]]; then
|
||||||
salt-call state.apply -l info common >> $setup_log 2>&1
|
set_progress_str 62 "$(print_salt_state_apply 'common')"
|
||||||
|
salt-call state.apply -l info common >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ ! $is_helix && ! $is_receiver && ! $is_idh ]]; then
|
if [[ ! $is_helix && ! $is_receiver && ! $is_idh && ! $is_analyst ]]; then
|
||||||
set_progress_str 62 "$(print_salt_state_apply 'nginx')"
|
set_progress_str 62 "$(print_salt_state_apply 'nginx')"
|
||||||
salt-call state.apply -l info nginx >> $setup_log 2>&1
|
salt-call state.apply -l info nginx >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
@@ -968,10 +1053,12 @@ echo "1" > /root/accept_changes
|
|||||||
salt-call state.apply -l info filebeat >> $setup_log 2>&1
|
salt-call state.apply -l info filebeat >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_progress_str 85 'Applying finishing touches'
|
if [[ ! $is_analyst ]]; then
|
||||||
filter_unused_nics >> $setup_log 2>&1
|
set_progress_str 85 'Applying finishing touches'
|
||||||
network_setup >> $setup_log 2>&1
|
filter_unused_nics >> $setup_log 2>&1
|
||||||
so-ssh-harden >> $setup_log 2>&1
|
network_setup >> $setup_log 2>&1
|
||||||
|
so-ssh-harden >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ $is_manager || $is_import ]]; then
|
if [[ $is_manager || $is_import ]]; then
|
||||||
set_progress_str 87 'Adding user to SOC'
|
set_progress_str 87 'Adding user to SOC'
|
||||||
@@ -1025,9 +1112,9 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $ENDGAMEHOST ]]; then
|
if [[ -n $ENDGAMEHOST ]]; then
|
||||||
set_progress_str 99 'Configuring firewall for Endgame SMP'
|
set_progress_str 99 'Configuring firewall for Endgame SMP'
|
||||||
so-firewall --apply includehost endgame $ENDGAMEHOST >> $setup_log 2>&1
|
so-firewall --apply includehost endgame $ENDGAMEHOST >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
} | whiptail_gauge_post_setup "Running post-installation steps..."
|
} | whiptail_gauge_post_setup "Running post-installation steps..."
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,23 @@ whiptail_airgap() {
|
|||||||
INTERWEBS=$(echo "${INTERWEBS^^}" | tr -d ' ')
|
INTERWEBS=$(echo "${INTERWEBS^^}" | tr -d ' ')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
whiptail_analyst_install() {
|
||||||
|
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
|
|
||||||
|
Welcome to the Security Onion Analyst Workstation install!
|
||||||
|
|
||||||
|
Would you like to join this workstation to an existing grid?
|
||||||
|
|
||||||
|
NOTE: Selecting "no" will enable X Windows and set it to load at boot.
|
||||||
|
EOM
|
||||||
|
whiptail --title "$whiptail_title" \
|
||||||
|
--yesno "$message" 11 75 --defaultno
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
whiptail_avoid_default_hostname() {
|
whiptail_avoid_default_hostname() {
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
@@ -434,9 +451,9 @@ whiptail_end_settings() {
|
|||||||
|
|
||||||
if [[ $is_idh ]]; then
|
if [[ $is_idh ]]; then
|
||||||
__append_end_msg "IDH Services Enabled:"
|
__append_end_msg "IDH Services Enabled:"
|
||||||
for service in ${idh_services[@]}; do
|
for service in ${idh_services[@]}; do
|
||||||
__append_end_msg "- $service"
|
__append_end_msg "- $service"
|
||||||
done
|
done
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -725,6 +742,17 @@ whiptail_homenet_sensor() {
|
|||||||
export HNSENSOR
|
export HNSENSOR
|
||||||
}
|
}
|
||||||
|
|
||||||
|
whiptail_idh_preferences() {
|
||||||
|
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
idh_preferences=$(whiptail --title "$whiptail_title" --radiolist \
|
||||||
|
"\nBy default, the IDH services selected in the previous screen will be bound to all interfaces / IPs on this system.\n\nYou can choose below whether or not to prevent IDH services from being published on this system's management IP." 20 75 5 \
|
||||||
|
"$MAINIP" "Disable IDH services on this management IP " OFF 3>&1 1>&2 2>&3 )
|
||||||
|
|
||||||
|
local exitstatus=$?
|
||||||
|
whiptail_check_exitstatus $exitstatus
|
||||||
|
}
|
||||||
|
|
||||||
whiptail_idh_services() {
|
whiptail_idh_services() {
|
||||||
|
|
||||||
@@ -1473,6 +1501,14 @@ whiptail_oinkcode() {
|
|||||||
#TODO: helper function to display error message or exit if batch mode
|
#TODO: helper function to display error message or exit if batch mode
|
||||||
# exit_if_batch <"Error string"> <Error code (int)>
|
# exit_if_batch <"Error string"> <Error code (int)>
|
||||||
|
|
||||||
|
whiptail_error_message() {
|
||||||
|
|
||||||
|
local error_message=$1 # message to be displayed
|
||||||
|
|
||||||
|
whiptail --title "$whiptail_title" --msgbox "$error_message" 10 75
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
whiptail_passwords_dont_match() {
|
whiptail_passwords_dont_match() {
|
||||||
|
|
||||||
whiptail --title "$whiptail_title" --msgbox "Passwords don't match. Please re-enter." 8 75
|
whiptail --title "$whiptail_title" --msgbox "Passwords don't match. Please re-enter." 8 75
|
||||||
@@ -1545,40 +1581,37 @@ whiptail_patch_schedule_select_hours() {
|
|||||||
|
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
# Select the hours to patch
|
# Select the hours to patch
|
||||||
PATCHSCHEDULEHOURS=$(whiptail --title "$whiptail_title" --checklist \
|
PATCHSCHEDULEHOURS=$(whiptail --title "$whiptail_title" --checklist \
|
||||||
"At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \
|
"At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \
|
||||||
00:00 "" OFF \
|
00:00 "" OFF \
|
||||||
01:00 "" OFF \
|
01:00 "" OFF \
|
||||||
02:00 "" ON \
|
02:00 "" ON \
|
||||||
03:00 "" OFF \
|
03:00 "" OFF \
|
||||||
04:00 "" OFF \
|
04:00 "" OFF \
|
||||||
05:00 "" OFF \
|
05:00 "" OFF \
|
||||||
06:00 "" OFF \
|
06:00 "" OFF \
|
||||||
07:00 "" OFF \
|
07:00 "" OFF \
|
||||||
08:00 "" OFF \
|
08:00 "" OFF \
|
||||||
09:00 "" OFF \
|
09:00 "" OFF \
|
||||||
10:00 "" OFF \
|
10:00 "" OFF \
|
||||||
11:00 "" OFF \
|
11:00 "" OFF \
|
||||||
12:00 "" OFF \
|
12:00 "" OFF \
|
||||||
13:00 "" OFF \
|
13:00 "" OFF \
|
||||||
14:00 "" OFF \
|
14:00 "" OFF \
|
||||||
15:00 "" OFF \
|
15:00 "" OFF \
|
||||||
16:00 "" OFF \
|
16:00 "" OFF \
|
||||||
17:00 "" OFF \
|
17:00 "" OFF \
|
||||||
18:00 "" OFF \
|
18:00 "" OFF \
|
||||||
19:00 "" OFF \
|
19:00 "" OFF \
|
||||||
20:00 "" OFF \
|
20:00 "" OFF \
|
||||||
21:00 "" OFF \
|
21:00 "" OFF \
|
||||||
22:00 "" OFF \
|
22:00 "" OFF \
|
||||||
23:00 "" OFF 3>&1 1>&2 2>&3)
|
23:00 "" OFF 3>&1 1>&2 2>&3)
|
||||||
|
local exitstatus=$?
|
||||||
local exitstatus=$?
|
whiptail_check_exitstatus $exitstatus
|
||||||
whiptail_check_exitstatus $exitstatus
|
PATCHSCHEDULEHOURS=$(echo "$PATCHSCHEDULEHOURS" | tr -d '"')
|
||||||
|
IFS=' ' read -ra PATCHSCHEDULEHOURS <<< "$PATCHSCHEDULEHOURS"
|
||||||
PATCHSCHEDULEHOURS=$(echo "$PATCHSCHEDULEHOURS" | tr -d '"')
|
|
||||||
|
|
||||||
IFS=' ' read -ra PATCHSCHEDULEHOURS <<< "$PATCHSCHEDULEHOURS"
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1923,10 +1956,10 @@ whiptail_suricata_pins() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [[ $is_node && $is_sensor && ! $is_eval ]]; then
|
if [[ $is_node && $is_sensor && ! $is_eval ]]; then
|
||||||
local PROCS=$(expr $lb_procs / 2)
|
local PROCS=$(expr $lb_procs / 2)
|
||||||
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
|
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
|
||||||
else
|
else
|
||||||
local PROCS=$lb_procs
|
local PROCS=$lb_procs
|
||||||
fi
|
fi
|
||||||
|
|
||||||
SURIPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Suricata to:" 20 75 12 "${filtered_core_str[@]}" 3>&1 1>&2 2>&3 )
|
SURIPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Suricata to:" 20 75 12 "${filtered_core_str[@]}" 3>&1 1>&2 2>&3 )
|
||||||
@@ -2006,10 +2039,10 @@ whiptail_zeek_pins() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
if [[ $is_smooshed ]]; then
|
if [[ $is_smooshed ]]; then
|
||||||
local PROCS=$(expr $lb_procs / 2)
|
local PROCS=$(expr $lb_procs / 2)
|
||||||
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
|
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
|
||||||
else
|
else
|
||||||
local PROCS=$lb_procs
|
local PROCS=$lb_procs
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ZEEKPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )
|
ZEEKPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )
|
||||||
|
|||||||
BIN
sigs/securityonion-2.3.110-20220404.iso.sig
Normal file
BIN
sigs/securityonion-2.3.110-20220404.iso.sig
Normal file
Binary file not shown.
BIN
sigs/securityonion-2.3.110-20220405.iso.sig
Normal file
BIN
sigs/securityonion-2.3.110-20220405.iso.sig
Normal file
Binary file not shown.
BIN
sigs/securityonion-2.3.110-20220407.iso.sig
Normal file
BIN
sigs/securityonion-2.3.110-20220407.iso.sig
Normal file
Binary file not shown.
Reference in New Issue
Block a user