mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
merge with dev and resolve conflicts
This commit is contained in:
12
salt/airgap/files/yum.conf
Normal file
12
salt/airgap/files/yum.conf
Normal file
@@ -0,0 +1,12 @@
|
||||
[main]
|
||||
cachedir=/var/cache/yum/$basearch/$releasever
|
||||
keepcache=0
|
||||
debuglevel=2
|
||||
logfile=/var/log/yum.log
|
||||
exactarch=1
|
||||
obsoletes=1
|
||||
gpgcheck=1
|
||||
plugins=1
|
||||
installonly_limit=2
|
||||
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
|
||||
distroverpkg=centos-release
|
||||
60
salt/airgap/init.sls
Normal file
60
salt/airgap/init.sls
Normal file
@@ -0,0 +1,60 @@
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
airgapyum:
|
||||
file.managed:
|
||||
- name: /etc/yum/yum.conf
|
||||
- source: salt://airgap/files/yum.conf
|
||||
|
||||
airgap_repo:
|
||||
pkgrepo.managed:
|
||||
- humanname: Airgap Repo
|
||||
- baseurl: https://{{ MANAGER }}/repo
|
||||
- gpgcheck: 0
|
||||
- sslverify: 0
|
||||
|
||||
agbase:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Base.repo
|
||||
|
||||
agcr:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-CR.repo
|
||||
|
||||
agdebug:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Debuginfo.repo
|
||||
|
||||
agfasttrack:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-fasttrack.repo
|
||||
|
||||
agmedia:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Media.repo
|
||||
|
||||
agsources:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Sources.repo
|
||||
|
||||
agvault:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Vault.repo
|
||||
|
||||
agkernel:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-x86_64-kernel.repo
|
||||
|
||||
agepel:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/epel.repo
|
||||
|
||||
agtesting:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/epel-testing.repo
|
||||
|
||||
agssrepo:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/saltstack.repo
|
||||
|
||||
agwazrepo:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/wazuh.repo
|
||||
@@ -5,6 +5,9 @@
|
||||
# to the list predefined by the role / minion id affix
|
||||
{% macro append_containers(pillar_name, k, compare )%}
|
||||
{% if salt['pillar.get'](pillar_name~':'~k, {}) != compare %}
|
||||
{% if k == 'enabled' %}
|
||||
{% set k = pillar_name %}
|
||||
{% endif %}
|
||||
{% from 'common/maps/'~k~'.map.jinja' import docker as d with context %}
|
||||
{% for li in d['containers'] %}
|
||||
{{ docker['containers'].append(li) }}
|
||||
@@ -21,7 +24,7 @@
|
||||
{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
|
||||
{{ append_containers('manager', 'grafana', 0) }}
|
||||
{{ append_containers('global', 'fleet_manager', 0) }}
|
||||
{{ append_containers('manager', 'wazuh', 0) }}
|
||||
{{ append_containers('global', 'wazuh', 0) }}
|
||||
{{ append_containers('manager', 'thehive', 0) }}
|
||||
{{ append_containers('manager', 'playbook', 0) }}
|
||||
{{ append_containers('manager', 'freq', 0) }}
|
||||
@@ -29,7 +32,7 @@
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
|
||||
{{ append_containers('global', 'strelka', 0) }}
|
||||
{{ append_containers('strelka', 'enabled', 0) }}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['heavynode', 'standalone'] %}
|
||||
|
||||
@@ -48,6 +48,7 @@ if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to Cortex."
|
||||
else
|
||||
echo "Unable to add user to Cortex; user might already exist."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
|
||||
@@ -51,6 +51,7 @@ if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully updated user in Cortex."
|
||||
else
|
||||
echo "Failed to update user in Cortex."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
|
||||
@@ -54,5 +54,6 @@ if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully added user to Fleet."
|
||||
else
|
||||
echo "Unable to add user to Fleet; user might already exist."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
@@ -53,5 +53,6 @@ if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully updated user in Fleet."
|
||||
else
|
||||
echo "Failed to update user in Fleet."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
@@ -15,7 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{%- from 'common/maps/so-status.map.jinja' import docker with context %}
|
||||
{%- set container_list = docker['containers'] | sort %}
|
||||
{%- set container_list = docker['containers'] | sort | unique %}
|
||||
|
||||
if ! [ "$(id -u)" = 0 ]; then
|
||||
echo "This command must be run as root"
|
||||
@@ -71,9 +71,9 @@ compare_lists() {
|
||||
# {% endraw %}
|
||||
|
||||
create_expected_container_list() {
|
||||
{% for item in container_list%}
|
||||
{% for item in container_list -%}
|
||||
expected_container_list+=("{{ item }}")
|
||||
{% endfor %}
|
||||
{% endfor -%}
|
||||
}
|
||||
|
||||
populate_container_lists() {
|
||||
|
||||
@@ -47,5 +47,6 @@ if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to TheHive."
|
||||
else
|
||||
echo "Unable to add user to TheHive; user might already exist."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
|
||||
@@ -11,12 +11,13 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $# < 1 || $# > 2 ]]; then
|
||||
echo "Usage: $0 <list|add|update|delete|validate|valemail|valpass> [email]"
|
||||
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
|
||||
echo ""
|
||||
echo " list: Lists all user email addresses currently defined in the identity system"
|
||||
echo " add: Adds a new user to the identity system; requires 'email' parameter"
|
||||
echo " update: Updates a user's password; requires 'email' parameter"
|
||||
echo " delete: Deletes an existing user; requires 'email' parameter"
|
||||
echo " enable: Enables a user; requires 'email' parameter"
|
||||
echo " disable: Disables a user; requires 'email' parameter"
|
||||
echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter"
|
||||
echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter"
|
||||
echo " valpass: Validates that a password is acceptable for defining a new user"
|
||||
@@ -63,7 +64,7 @@ function findIdByEmail() {
|
||||
email=$1
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.addresses[0].value == \"$email\") | .id")
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
echo $identityId
|
||||
}
|
||||
|
||||
@@ -113,7 +114,7 @@ function listUsers() {
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
echo "${response}" | jq -r ".[] | .addresses[0].value" | sort
|
||||
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
|
||||
}
|
||||
|
||||
function createUser() {
|
||||
@@ -122,17 +123,8 @@ function createUser() {
|
||||
now=$(date -u +%FT%TZ)
|
||||
addUserJson=$(cat <<EOF
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"expires_at": "2099-01-31T12:00:00Z",
|
||||
"value": "${email}",
|
||||
"verified": true,
|
||||
"verified_at": "${now}",
|
||||
"via": "so-add-user"
|
||||
}
|
||||
],
|
||||
"traits": {"email":"${email}"},
|
||||
"traits_schema_id": "default"
|
||||
"schema_id": "default"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
@@ -152,6 +144,36 @@ EOF
|
||||
updatePassword $identityId
|
||||
}
|
||||
|
||||
function updateStatus() {
|
||||
email=$1
|
||||
status=$2
|
||||
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
response=$(curl -Ss "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
|
||||
if [[ "$status" == "locked" ]]; then
|
||||
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to lock credential record"
|
||||
|
||||
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
|
||||
else
|
||||
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to unlock credential record"
|
||||
fi
|
||||
|
||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
|
||||
response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
||||
|
||||
}
|
||||
|
||||
function updateUser() {
|
||||
email=$1
|
||||
|
||||
@@ -179,9 +201,8 @@ case "${operation}" in
|
||||
validateEmail "$email"
|
||||
createUser "$email"
|
||||
echo "Successfully added new user to SOC"
|
||||
check_container thehive && (echo $password | so-thehive-user-add "$email" || so-thehive-user-enable "$email" true)
|
||||
check_container cortex && (echo $password | so-cortex-user-add "$email" || so-cortex-user-enable "$email" true)
|
||||
check_container fleet && (echo $password | so-fleet-user-add "$email" || so-fleet-user-enable "$email" true)
|
||||
check_container thehive && echo $password | so-thehive-user-add "$email"
|
||||
check_container fleet && echo $password | so-fleet-user-add "$email"
|
||||
;;
|
||||
|
||||
"list")
|
||||
@@ -197,6 +218,26 @@ case "${operation}" in
|
||||
echo "Successfully updated user"
|
||||
;;
|
||||
|
||||
"enable")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateStatus "$email" 'active'
|
||||
echo "Successfully enabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" true
|
||||
check_container fleet && so-fleet-user-enable "$email" true
|
||||
;;
|
||||
|
||||
"disable")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateStatus "$email" 'locked'
|
||||
echo "Successfully disabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
;;
|
||||
|
||||
"delete")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
@@ -204,7 +245,6 @@ case "${operation}" in
|
||||
deleteUser "$email"
|
||||
echo "Successfully deleted user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container cortex && so-cortex-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
;;
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
so-user delete $*
|
||||
so-user disable $*
|
||||
2
salt/common/tools/sbin/so-user-enable
Executable file
2
salt/common/tools/sbin/so-user-enable
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
so-user enable $*
|
||||
@@ -117,6 +117,7 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.yum }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
@@ -200,6 +201,7 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.yum }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
@@ -283,6 +285,7 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.yum }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
|
||||
@@ -94,3 +94,6 @@ firewall:
|
||||
wazuh_authd:
|
||||
tcp:
|
||||
- 1515
|
||||
yum:
|
||||
tcp:
|
||||
- 443
|
||||
|
||||
@@ -1 +1 @@
|
||||
# Put your own custom Snort/Suricata rules in /opt/so/saltstack/local/salt/idstools/localrules/local.rules
|
||||
# Put your own custom Snort/Suricata rules in this file! /opt/so/saltstack/local/salt/idstools/localrules/local.rules
|
||||
@@ -2,6 +2,8 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -182,6 +184,19 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
location /grafana/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -232,6 +234,19 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{%- if FLEET_NODE %}
|
||||
location /fleet/ {
|
||||
return 301 https://{{ FLEET_IP }}/fleet;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -181,6 +183,19 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
location /grafana/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -182,6 +183,20 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
|
||||
location /grafana/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap') %}
|
||||
|
||||
# Drop the correct nginx config based on role
|
||||
nginxconfdir:
|
||||
@@ -77,6 +78,9 @@ so-nginx:
|
||||
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
|
||||
- /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
|
||||
- /opt/so/conf/fleet/packages:/opt/socore/html/packages
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- /nsm/repo:/opt/socore/html/repo:ro
|
||||
{% endif %}
|
||||
# ATT&CK Navigator binds
|
||||
- /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro
|
||||
- /opt/so/conf/navigator/nav_layer_playbook.json:/opt/socore/html/navigator/assets/playbook.json:ro
|
||||
|
||||
28
salt/salt/engines/checkmine.py
Normal file
28
salt/salt/engines/checkmine.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from time import sleep
|
||||
from os import remove
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def start(interval=30):
|
||||
log.info("checkmine engine started")
|
||||
minionid = __grains__['id']
|
||||
while True:
|
||||
try:
|
||||
ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt']
|
||||
log.info('Successfully queried Salt mine for the CA.')
|
||||
except:
|
||||
log.error('Could not pull CA from the Salt mine.')
|
||||
log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid)
|
||||
try:
|
||||
remove('/var/cache/salt/master/minions/%s/mine.p' % minionid)
|
||||
log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid)
|
||||
except FileNotFoundError:
|
||||
log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid)
|
||||
|
||||
__salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt')
|
||||
log.warning('Salt mine repopulated with /etc/pki/ca.crt')
|
||||
|
||||
sleep(interval)
|
||||
6
salt/salt/files/engines.conf
Normal file
6
salt/salt/files/engines.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
engines_dirs:
|
||||
- /etc/salt/engines
|
||||
|
||||
engines:
|
||||
- checkmine:
|
||||
interval: 30
|
||||
@@ -3,6 +3,9 @@
|
||||
|
||||
{% if 'salt.master' in top_states %}
|
||||
|
||||
include:
|
||||
- salt.minion
|
||||
|
||||
salt_master_package:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
@@ -15,4 +18,19 @@ salt_master_service:
|
||||
- name: salt-master
|
||||
- enable: True
|
||||
|
||||
checkmine_engine:
|
||||
file.managed:
|
||||
- name: /etc/salt/engines/checkmine.py
|
||||
- source: salt://salt/engines/checkmine.py
|
||||
- makedirs: True
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
|
||||
engines_config:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/engines.conf
|
||||
- source: salt://salt/files/engines.conf
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
|
||||
{% endif %}
|
||||
@@ -6,52 +6,39 @@ selfservice:
|
||||
password:
|
||||
enabled: true
|
||||
|
||||
flows:
|
||||
settings:
|
||||
privileged_session_max_age: 1m
|
||||
after:
|
||||
profile:
|
||||
hooks:
|
||||
- hook: verify
|
||||
ui_url: https://{{ WEBACCESS }}/?r=/settings
|
||||
|
||||
verify:
|
||||
return_to: https://{{ WEBACCESS }}/
|
||||
|
||||
logout:
|
||||
redirect_to: https://{{ WEBACCESS }}/login/
|
||||
verification:
|
||||
ui_url: https://{{ WEBACCESS }}/
|
||||
|
||||
login:
|
||||
request_lifespan: 10m
|
||||
ui_url: https://{{ WEBACCESS }}/login/
|
||||
|
||||
error:
|
||||
ui_url: https://{{ WEBACCESS }}/login/
|
||||
|
||||
registration:
|
||||
request_lifespan: 10m
|
||||
after:
|
||||
password:
|
||||
hooks:
|
||||
- hook: session
|
||||
- hook: verify
|
||||
ui_url: https://{{ WEBACCESS }}/login/
|
||||
|
||||
default_browser_return_url: https://{{ WEBACCESS }}/
|
||||
whitelisted_return_urls:
|
||||
- http://127.0.0.1
|
||||
|
||||
log:
|
||||
level: debug
|
||||
format: json
|
||||
|
||||
secrets:
|
||||
session:
|
||||
default:
|
||||
- {{ KRATOSKEY }}
|
||||
|
||||
urls:
|
||||
login_ui: https://{{ WEBACCESS }}/login/
|
||||
registration_ui: https://{{ WEBACCESS }}/login/
|
||||
error_ui: https://{{ WEBACCESS }}/login/
|
||||
settings_ui: https://{{ WEBACCESS }}/?r=/settings
|
||||
verify_ui: https://{{ WEBACCESS }}/
|
||||
mfa_ui: https://{{ WEBACCESS }}/
|
||||
|
||||
self:
|
||||
public: https://{{ WEBACCESS }}/auth/
|
||||
admin: https://{{ WEBACCESS }}/kratos/
|
||||
default_return_to: https://{{ WEBACCESS }}/
|
||||
whitelisted_return_to_urls:
|
||||
- http://127.0.0.1
|
||||
serve:
|
||||
public:
|
||||
base_url: https://{{ WEBACCESS }}/auth/
|
||||
admin:
|
||||
base_url: https://{{ WEBACCESS }}/kratos/
|
||||
|
||||
hashers:
|
||||
argon2:
|
||||
@@ -62,7 +49,6 @@ hashers:
|
||||
key_length: 32
|
||||
|
||||
identity:
|
||||
traits:
|
||||
default_schema_url: file:///kratos-conf/schema.json
|
||||
|
||||
courier:
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
"$id": "securityonion.schema.json",
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Person",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"traits": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {
|
||||
@@ -31,6 +34,10 @@
|
||||
"role": {
|
||||
"type": "string",
|
||||
"title": "Role"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
@@ -38,3 +45,5 @@
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -141,6 +141,12 @@
|
||||
{ "name": "x509", "description": "x.509 grouped by issuer", "query": "event.dataset:x509 | groupby x509.certificate.issuer"},
|
||||
{ "name": "x509", "description": "x.509 grouped by subject", "query": "event.dataset:x509 | groupby x509.certificate.subject"},
|
||||
{ "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"}
|
||||
],
|
||||
"actions": [
|
||||
{ "name": "", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}" },
|
||||
{ "name": "", "description": "actionAlertHelp", "icon": "fa-bell", "link": "/soctopus/thehive/alert/{eventId}" },
|
||||
{ "name": "", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}" },
|
||||
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "", "link": "https://www.virustotal.com/gui/search/{value}" }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +65,7 @@ playbook_ext_url = https://{{MANAGER}}/playbook
|
||||
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
|
||||
playbook_verifycert = no
|
||||
playbook_unit_test_index = playbook-testing
|
||||
playbook_rulesets = windows
|
||||
|
||||
[log]
|
||||
logfile = /var/log/SOCtopus/soctopus.log
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
|
||||
|
||||
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %}
|
||||
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
|
||||
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
|
||||
{% set ca_server = grains.id %}
|
||||
{% else %}
|
||||
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
|
||||
|
||||
@@ -2,8 +2,8 @@ suricata:
|
||||
config:
|
||||
vars:
|
||||
address-groups:
|
||||
HOME_NET: "[192.168.0.0/16]"
|
||||
EXTERNAL_NET: "!$HOME_NET"
|
||||
HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
|
||||
EXTERNAL_NET: "any"
|
||||
HTTP_SERVERS: "$HOME_NET"
|
||||
SMTP_SERVERS: "$HOME_NET"
|
||||
SQL_SERVERS: "$HOME_NET"
|
||||
|
||||
38
salt/top.sls
38
salt/top.sls
@@ -1,25 +1,32 @@
|
||||
{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', '') -%}
|
||||
{%- set WAZUH = salt['pillar.get']('global:wazuh', '0') -%}
|
||||
{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') -%}
|
||||
{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') -%}
|
||||
{%- set FREQSERVER = salt['pillar.get']('manager:freq', '0') -%}
|
||||
{%- set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') -%}
|
||||
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
|
||||
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
|
||||
{%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as salt %}
|
||||
{% set saltversion = salt.salt.minion.version %}
|
||||
|
||||
{% set ZEEKVER = salt['pillar.get']('global:zeekversion', '') %}
|
||||
{% set WAZUH = salt['pillar.get']('global:wazuh', '0') %}
|
||||
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
|
||||
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
|
||||
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
|
||||
{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %}
|
||||
{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %}
|
||||
{% set STRELKA = salt['pillar.get']('strelka:enabled', '0') %}
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||
{% set saltversion = saltversion.salt.minion.version %}
|
||||
|
||||
base:
|
||||
|
||||
'not G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- airgap
|
||||
{% endif %}
|
||||
- salt.minion
|
||||
|
||||
'G@os:CentOS and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- airgap
|
||||
{% else %}
|
||||
- yum
|
||||
{% endif %}
|
||||
- yum.packages
|
||||
|
||||
'* and G@saltversion:{{saltversion}}':
|
||||
@@ -31,6 +38,7 @@ base:
|
||||
|
||||
'*_helix and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
@@ -72,6 +80,7 @@ base:
|
||||
|
||||
'*_eval and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
@@ -129,6 +138,7 @@ base:
|
||||
|
||||
'*_manager and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
@@ -175,6 +185,7 @@ base:
|
||||
|
||||
'*_standalone and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
@@ -195,6 +206,7 @@ base:
|
||||
{%- if WAZUH != 0 %}
|
||||
- wazuh
|
||||
{%- endif %}
|
||||
- elasticsearch
|
||||
- logstash
|
||||
- redis
|
||||
- kibana
|
||||
@@ -298,6 +310,7 @@ base:
|
||||
|
||||
'*_managersearch and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
@@ -388,6 +401,7 @@ base:
|
||||
|
||||
'*_import and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
|
||||
@@ -20,6 +20,9 @@ if [ "$(id -u)" -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install a GUI text editor
|
||||
yum -y install gedit
|
||||
|
||||
# Install misc utils
|
||||
yum -y install wget curl unzip epel-release;
|
||||
|
||||
@@ -28,15 +31,41 @@ yum -y groupinstall "X Window System";
|
||||
yum -y install gnome-classic-session gnome-terminal nautilus-open-terminal control-center liberation-mono-fonts;
|
||||
unlink /etc/systemd/system/default.target;
|
||||
ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target;
|
||||
yum -y install file-roller
|
||||
|
||||
# NetworkMiner has a compatibility issue with Mono 6 right now
|
||||
if ! grep -q "NetworkMiner has a compatibility issue with Mono 6 right now" /etc/yum/pluginconf.d/versionlock.list; then
|
||||
|
||||
cat << EOF >> /etc/yum/pluginconf.d/versionlock.list
|
||||
|
||||
# NetworkMiner has a compatibility issue with Mono 6 right now
|
||||
0:mono-complete-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-core-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-data-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-data-oracle-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-data-sqlite-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-devel-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-extras-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-locale-extras-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-mvc-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-nunit-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-reactive-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-wcf-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-web-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-winforms-4.2.1.102-0.xamarin.1.*
|
||||
0:mono-winfxcore-4.2.1.102-0.xamarin.1.*
|
||||
EOF
|
||||
|
||||
fi
|
||||
|
||||
# Install Mono - prereq for NetworkMiner
|
||||
rpmkeys --import "http://pool.sks-keyservers.net/pks/lookup?op=get&search=0x3fa7e0328081bff6a14da29aa6a19b38d3d831ef";
|
||||
curl https://download.mono-project.com/repo/centos7-stable.repo | tee /etc/yum.repos.d/mono-centos7-stable.repo;
|
||||
yum -y install mono-devel;
|
||||
yum -y install mono-core mono-basic mono-winforms expect
|
||||
|
||||
# Install NetworkMiner
|
||||
yum -y install libcanberra-gtk2;
|
||||
wget https://www.netresec.com/?download=NetworkMiner -O /tmp/nm.zip;
|
||||
wget https://www.netresec.com/?download=NetworkMiner_2-4 -O /tmp/nm.zip;
|
||||
mkdir -p /opt/networkminer/
|
||||
unzip /tmp/nm.zip -d /opt/networkminer/;
|
||||
rm /tmp/nm.zip;
|
||||
|
||||
@@ -480,72 +480,6 @@ check_requirements() {
|
||||
fi
|
||||
}
|
||||
|
||||
copy_salt_master_config() {
|
||||
|
||||
# Copy the Salt master config template to the proper directory
|
||||
if [ "$setup_type" = 'iso' ]; then
|
||||
cp /root/SecurityOnion/files/master /etc/salt/master >> "$setup_log" 2>&1
|
||||
else
|
||||
cp ../files/master /etc/salt/master >> "$setup_log" 2>&1
|
||||
fi
|
||||
|
||||
# Restart the service so it picks up the changes
|
||||
systemctl restart salt-master >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
copy_minion_tmp_files() {
|
||||
case "$install_type" in
|
||||
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
|
||||
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
||||
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
|
||||
if [ -d "$temp_install_dir"/salt ] ; then
|
||||
cp -Rv "$temp_install_dir"/salt/ $local_salt_dir/ >> "$setup_log" 2>&1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
{
|
||||
echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
|
||||
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
|
||||
if [ -d $temp_install_dir/salt/patch/os/schedules/ ]; then
|
||||
if [ "$(ls -A $temp_install_dir/salt/patch/os/schedules/)" ]; then
|
||||
scp -prv -i /root/.ssh/so.key $temp_install_dir/salt/patch/os/schedules/* soremote@$MSRV:/tmp/$MINION_ID/schedules;
|
||||
fi
|
||||
fi
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
|
||||
} >> "$setup_log" 2>&1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
copy_ssh_key() {
|
||||
|
||||
echo "Generating SSH key"
|
||||
# Generate SSH key
|
||||
mkdir -p /root/.ssh
|
||||
ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero
|
||||
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
|
||||
echo "Copying the SSH key to the manager"
|
||||
#Copy the key over to the manager
|
||||
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
|
||||
}
|
||||
|
||||
create_local_directories() {
|
||||
echo "Creating local pillar and salt directories"
|
||||
PILLARSALTDIR=${SCRIPTDIR::-5}
|
||||
for i in "pillar" "salt"; do
|
||||
for d in $(find $PILLARSALTDIR/$i -type d); do
|
||||
suffixdir=${d//$PILLARSALTDIR/}
|
||||
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
|
||||
mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
|
||||
fi
|
||||
done
|
||||
chown -R socore:socore "$local_salt_dir/$i"
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
configure_network_sensor() {
|
||||
echo "Setting up sensor interface" >> "$setup_log" 2>&1
|
||||
local nic_error=0
|
||||
@@ -630,6 +564,77 @@ configure_network_sensor() {
|
||||
fi
|
||||
}
|
||||
|
||||
copy_salt_master_config() {
|
||||
|
||||
# Copy the Salt master config template to the proper directory
|
||||
if [ "$setup_type" = 'iso' ]; then
|
||||
cp /root/SecurityOnion/files/master /etc/salt/master >> "$setup_log" 2>&1
|
||||
else
|
||||
cp ../files/master /etc/salt/master >> "$setup_log" 2>&1
|
||||
fi
|
||||
|
||||
# Restart the service so it picks up the changes
|
||||
systemctl restart salt-master >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
copy_minion_tmp_files() {
|
||||
case "$install_type" in
|
||||
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
|
||||
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
||||
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
|
||||
if [ -d "$temp_install_dir"/salt ] ; then
|
||||
cp -Rv "$temp_install_dir"/salt/ $local_salt_dir/ >> "$setup_log" 2>&1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
{
|
||||
echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
|
||||
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
|
||||
if [ -d $temp_install_dir/salt/patch/os/schedules/ ]; then
|
||||
if [ "$(ls -A $temp_install_dir/salt/patch/os/schedules/)" ]; then
|
||||
scp -prv -i /root/.ssh/so.key $temp_install_dir/salt/patch/os/schedules/* soremote@$MSRV:/tmp/$MINION_ID/schedules;
|
||||
fi
|
||||
fi
|
||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
|
||||
} >> "$setup_log" 2>&1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
copy_ssh_key() {
|
||||
|
||||
echo "Generating SSH key"
|
||||
# Generate SSH key
|
||||
mkdir -p /root/.ssh
|
||||
ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero
|
||||
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
|
||||
echo "Copying the SSH key to the manager"
|
||||
#Copy the key over to the manager
|
||||
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
|
||||
}
|
||||
|
||||
create_local_directories() {
|
||||
echo "Creating local pillar and salt directories"
|
||||
PILLARSALTDIR=${SCRIPTDIR::-5}
|
||||
for i in "pillar" "salt"; do
|
||||
for d in $(find $PILLARSALTDIR/$i -type d); do
|
||||
suffixdir=${d//$PILLARSALTDIR/}
|
||||
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
|
||||
mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
|
||||
fi
|
||||
done
|
||||
chown -R socore:socore "$local_salt_dir/$i"
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
create_repo() {
|
||||
# Create the repo for airgap
|
||||
createrepo /nsm/repo
|
||||
}
|
||||
|
||||
detect_cloud() {
|
||||
echo "Testing if setup is running on a cloud instance..." >> "$setup_log" 2>&1
|
||||
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi
|
||||
@@ -1097,6 +1102,14 @@ manager_global() {
|
||||
" url_base: $REDIRECTIT"\
|
||||
" managerip: $MAINIP" > "$global_pillar"
|
||||
|
||||
if [[ $is_airgap ]]; then
|
||||
printf '%s\n'\
|
||||
" airgap: True"\ >> "$global_pillar"
|
||||
else
|
||||
printf '%s\n'\
|
||||
" airgap: False"\ >> "$global_pillar"
|
||||
fi
|
||||
|
||||
# Check if TheHive is enabled. If so, add creds and other details
|
||||
if [[ "$THEHIVE" == "1" ]]; then
|
||||
printf '%s\n'\
|
||||
@@ -1860,9 +1873,11 @@ set_redirect() {
|
||||
set_updates() {
|
||||
if [ "$MANAGERUPDATES" = '1' ]; then
|
||||
if [ "$OS" = 'centos' ]; then
|
||||
if [[ ! $is_airgap ]]; then
|
||||
if ! grep -q "$MSRV" /etc/yum.conf; then
|
||||
echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Set it up so the updates roll through the manager
|
||||
printf '%s\n'\
|
||||
|
||||
@@ -195,14 +195,14 @@ fi
|
||||
|
||||
# Check if this is an airgap install
|
||||
|
||||
#if [[ $is_manager ]]; then
|
||||
# if [[ $is_iso ]]; then
|
||||
# whiptail_airgap
|
||||
# if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
|
||||
# is_airgap=true
|
||||
# fi
|
||||
# fi
|
||||
#fi
|
||||
if [[ $is_manager ]]; then
|
||||
if [[ $is_iso ]]; then
|
||||
whiptail_airgap
|
||||
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
|
||||
is_airgap=true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $is_manager && $is_sensor ]]; then
|
||||
check_requirements "standalone"
|
||||
@@ -411,6 +411,8 @@ if [[ $is_manager || $is_import ]]; then whiptail_so_allow; fi
|
||||
|
||||
whiptail_make_changes
|
||||
|
||||
# From here on changes will be made.
|
||||
|
||||
if [[ -n "$TURBO" ]]; then
|
||||
use_turbo_proxy
|
||||
fi
|
||||
@@ -460,6 +462,11 @@ fi
|
||||
# Set initial percentage to 0
|
||||
export percentage=0
|
||||
|
||||
if [[ $is_manager && $is_airgap ]]; then
|
||||
info "Creating airgap repo"
|
||||
create_repo >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
set_progress_str 1 'Configuring firewall'
|
||||
set_initial_firewall_policy >> $setup_log 2>&1
|
||||
|
||||
@@ -97,8 +97,8 @@ whiptail_zeek_version() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 75 4 "ZEEK" "Install Zeek (aka Bro)" ON \
|
||||
"SURICATA" "Use Suricata 5" OFF 3>&1 1>&2 2>&3)
|
||||
ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate metadata?" 20 75 4 "ZEEK" "Zeek (formerly known as Bro)" ON \
|
||||
"SURICATA" "Suricata" OFF 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
@@ -412,6 +412,13 @@ whiptail_enable_components() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
GRAFANA=0
|
||||
OSQUERY=0
|
||||
WAZUH=0
|
||||
THEHIVE=0
|
||||
PLAYBOOK=0
|
||||
STRELKA=0
|
||||
|
||||
COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
|
||||
"Select Components to install" 20 75 8 \
|
||||
GRAFANA "Enable Grafana for system monitoring" ON \
|
||||
@@ -621,9 +628,8 @@ whiptail_nids() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
NIDS=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose which IDS to run. \n
|
||||
Snort 3.0 support will be added once it is out of beta:" 25 75 4 \
|
||||
"Suricata" "Suricata 4.X" ON \
|
||||
"Choose which IDS to run. \n\n(Snort 3.0 support will be added once it is out of beta.)" 25 75 4 \
|
||||
"Suricata" "Suricata" ON \
|
||||
"Snort" "Placeholder for Snort 3.0 " OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
|
||||
Reference in New Issue
Block a user