merge with dev and resolve conflicts

This commit is contained in:
m0duspwnens
2020-09-09 16:23:36 -04:00
35 changed files with 514 additions and 197 deletions

View File

@@ -0,0 +1,12 @@
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
debuglevel=2
logfile=/var/log/yum.log
exactarch=1
obsoletes=1
gpgcheck=1
plugins=1
installonly_limit=2
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release

60
salt/airgap/init.sls Normal file
View File

@@ -0,0 +1,60 @@
{% set MANAGER = salt['grains.get']('master') %}
airgapyum:
file.managed:
- name: /etc/yum/yum.conf
- source: salt://airgap/files/yum.conf
airgap_repo:
pkgrepo.managed:
- humanname: Airgap Repo
- baseurl: https://{{ MANAGER }}/repo
- gpgcheck: 0
- sslverify: 0
agbase:
file.absent:
- name: /etc/yum.repos.d/CentOS-Base.repo
agcr:
file.absent:
- name: /etc/yum.repos.d/CentOS-CR.repo
agdebug:
file.absent:
- name: /etc/yum.repos.d/CentOS-Debuginfo.repo
agfasttrack:
file.absent:
- name: /etc/yum.repos.d/CentOS-fasttrack.repo
agmedia:
file.absent:
- name: /etc/yum.repos.d/CentOS-Media.repo
agsources:
file.absent:
- name: /etc/yum.repos.d/CentOS-Sources.repo
agvault:
file.absent:
- name: /etc/yum.repos.d/CentOS-Vault.repo
agkernel:
file.absent:
- name: /etc/yum.repos.d/CentOS-x86_64-kernel.repo
agepel:
file.absent:
- name: /etc/yum.repos.d/epel.repo
agtesting:
file.absent:
- name: /etc/yum.repos.d/epel-testing.repo
agssrepo:
file.absent:
- name: /etc/yum.repos.d/saltstack.repo
agwazrepo:
file.absent:
- name: /etc/yum.repos.d/wazuh.repo

View File

@@ -5,6 +5,9 @@
# to the list predefined by the role / minion id affix # to the list predefined by the role / minion id affix
{% macro append_containers(pillar_name, k, compare )%} {% macro append_containers(pillar_name, k, compare )%}
{% if salt['pillar.get'](pillar_name~':'~k, {}) != compare %} {% if salt['pillar.get'](pillar_name~':'~k, {}) != compare %}
{% if k == 'enabled' %}
{% set k = pillar_name %}
{% endif %}
{% from 'common/maps/'~k~'.map.jinja' import docker as d with context %} {% from 'common/maps/'~k~'.map.jinja' import docker as d with context %}
{% for li in d['containers'] %} {% for li in d['containers'] %}
{{ docker['containers'].append(li) }} {{ docker['containers'].append(li) }}
@@ -21,7 +24,7 @@
{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %} {% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
{{ append_containers('manager', 'grafana', 0) }} {{ append_containers('manager', 'grafana', 0) }}
{{ append_containers('global', 'fleet_manager', 0) }} {{ append_containers('global', 'fleet_manager', 0) }}
{{ append_containers('manager', 'wazuh', 0) }} {{ append_containers('global', 'wazuh', 0) }}
{{ append_containers('manager', 'thehive', 0) }} {{ append_containers('manager', 'thehive', 0) }}
{{ append_containers('manager', 'playbook', 0) }} {{ append_containers('manager', 'playbook', 0) }}
{{ append_containers('manager', 'freq', 0) }} {{ append_containers('manager', 'freq', 0) }}
@@ -29,7 +32,7 @@
{% endif %} {% endif %}
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %} {% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
{{ append_containers('global', 'strelka', 0) }} {{ append_containers('strelka', 'enabled', 0) }}
{% endif %} {% endif %}
{% if role in ['heavynode', 'standalone'] %} {% if role in ['heavynode', 'standalone'] %}

View File

@@ -48,6 +48,7 @@ if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to Cortex." echo "Successfully added user to Cortex."
else else
echo "Unable to add user to Cortex; user might already exist." echo "Unable to add user to Cortex; user might already exist."
echo $resp
exit 2 exit 2
fi fi

View File

@@ -51,6 +51,7 @@ if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully updated user in Cortex." echo "Successfully updated user in Cortex."
else else
echo "Failed to update user in Cortex." echo "Failed to update user in Cortex."
echo $resp
exit 2 exit 2
fi fi

View File

@@ -54,5 +54,6 @@ if [[ $? -eq 0 ]]; then
echo "Successfully added user to Fleet." echo "Successfully added user to Fleet."
else else
echo "Unable to add user to Fleet; user might already exist." echo "Unable to add user to Fleet; user might already exist."
echo $resp
exit 2 exit 2
fi fi

View File

@@ -53,5 +53,6 @@ if [[ $? -eq 0 ]]; then
echo "Successfully updated user in Fleet." echo "Successfully updated user in Fleet."
else else
echo "Failed to update user in Fleet." echo "Failed to update user in Fleet."
echo $resp
exit 2 exit 2
fi fi

View File

@@ -15,7 +15,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- from 'common/maps/so-status.map.jinja' import docker with context %} {%- from 'common/maps/so-status.map.jinja' import docker with context %}
{%- set container_list = docker['containers'] | sort %} {%- set container_list = docker['containers'] | sort | unique %}
if ! [ "$(id -u)" = 0 ]; then if ! [ "$(id -u)" = 0 ]; then
echo "This command must be run as root" echo "This command must be run as root"
@@ -71,9 +71,9 @@ compare_lists() {
# {% endraw %} # {% endraw %}
create_expected_container_list() { create_expected_container_list() {
{% for item in container_list%} {% for item in container_list -%}
expected_container_list+=("{{ item }}") expected_container_list+=("{{ item }}")
{% endfor %} {% endfor -%}
} }
populate_container_lists() { populate_container_lists() {

View File

@@ -47,5 +47,6 @@ if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to TheHive." echo "Successfully added user to TheHive."
else else
echo "Unable to add user to TheHive; user might already exist." echo "Unable to add user to TheHive; user might already exist."
echo $resp
exit 2 exit 2
fi fi

View File

@@ -11,12 +11,13 @@
. /usr/sbin/so-common . /usr/sbin/so-common
if [[ $# < 1 || $# > 2 ]]; then if [[ $# < 1 || $# > 2 ]]; then
echo "Usage: $0 <list|add|update|delete|validate|valemail|valpass> [email]" echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
echo "" echo ""
echo " list: Lists all user email addresses currently defined in the identity system" echo " list: Lists all user email addresses currently defined in the identity system"
echo " add: Adds a new user to the identity system; requires 'email' parameter" echo " add: Adds a new user to the identity system; requires 'email' parameter"
echo " update: Updates a user's password; requires 'email' parameter" echo " update: Updates a user's password; requires 'email' parameter"
echo " delete: Deletes an existing user; requires 'email' parameter" echo " enable: Enables a user; requires 'email' parameter"
echo " disable: Disables a user; requires 'email' parameter"
echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter" echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter"
echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter" echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter"
echo " valpass: Validates that a password is acceptable for defining a new user" echo " valpass: Validates that a password is acceptable for defining a new user"
@@ -63,7 +64,7 @@ function findIdByEmail() {
email=$1 email=$1
response=$(curl -Ss ${kratosUrl}/identities) response=$(curl -Ss ${kratosUrl}/identities)
identityId=$(echo "${response}" | jq ".[] | select(.addresses[0].value == \"$email\") | .id") identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
echo $identityId echo $identityId
} }
@@ -113,7 +114,7 @@ function listUsers() {
response=$(curl -Ss ${kratosUrl}/identities) response=$(curl -Ss ${kratosUrl}/identities)
[[ $? != 0 ]] && fail "Unable to communicate with Kratos" [[ $? != 0 ]] && fail "Unable to communicate with Kratos"
echo "${response}" | jq -r ".[] | .addresses[0].value" | sort echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
} }
function createUser() { function createUser() {
@@ -122,17 +123,8 @@ function createUser() {
now=$(date -u +%FT%TZ) now=$(date -u +%FT%TZ)
addUserJson=$(cat <<EOF addUserJson=$(cat <<EOF
{ {
"addresses": [
{
"expires_at": "2099-01-31T12:00:00Z",
"value": "${email}",
"verified": true,
"verified_at": "${now}",
"via": "so-add-user"
}
],
"traits": {"email":"${email}"}, "traits": {"email":"${email}"},
"traits_schema_id": "default" "schema_id": "default"
} }
EOF EOF
) )
@@ -152,6 +144,36 @@ EOF
updatePassword $identityId updatePassword $identityId
} }
function updateStatus() {
email=$1
status=$2
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
response=$(curl -Ss "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
if [[ "$status" == "locked" ]]; then
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to lock credential record"
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
else
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to unlock credential record"
fi
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson")
[[ $? != 0 ]] && fail "Unable to mark user as locked"
}
function updateUser() { function updateUser() {
email=$1 email=$1
@@ -179,9 +201,8 @@ case "${operation}" in
validateEmail "$email" validateEmail "$email"
createUser "$email" createUser "$email"
echo "Successfully added new user to SOC" echo "Successfully added new user to SOC"
check_container thehive && (echo $password | so-thehive-user-add "$email" || so-thehive-user-enable "$email" true) check_container thehive && echo $password | so-thehive-user-add "$email"
check_container cortex && (echo $password | so-cortex-user-add "$email" || so-cortex-user-enable "$email" true) check_container fleet && echo $password | so-fleet-user-add "$email"
check_container fleet && (echo $password | so-fleet-user-add "$email" || so-fleet-user-enable "$email" true)
;; ;;
"list") "list")
@@ -197,6 +218,26 @@ case "${operation}" in
echo "Successfully updated user" echo "Successfully updated user"
;; ;;
"enable")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
updateStatus "$email" 'active'
echo "Successfully enabled user"
check_container thehive && so-thehive-user-enable "$email" true
check_container fleet && so-fleet-user-enable "$email" true
;;
"disable")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
updateStatus "$email" 'locked'
echo "Successfully disabled user"
check_container thehive && so-thehive-user-enable "$email" false
check_container fleet && so-fleet-user-enable "$email" false
;;
"delete") "delete")
verifyEnvironment verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided" [[ "$email" == "" ]] && fail "Email address must be provided"
@@ -204,7 +245,6 @@ case "${operation}" in
deleteUser "$email" deleteUser "$email"
echo "Successfully deleted user" echo "Successfully deleted user"
check_container thehive && so-thehive-user-enable "$email" false check_container thehive && so-thehive-user-enable "$email" false
check_container cortex && so-cortex-user-enable "$email" false
check_container fleet && so-fleet-user-enable "$email" false check_container fleet && so-fleet-user-enable "$email" false
;; ;;

View File

@@ -1,2 +1,2 @@
#!/bin/bash #!/bin/bash
so-user delete $* so-user disable $*

View File

@@ -0,0 +1,2 @@
#!/bin/bash
so-user enable $*

View File

@@ -117,6 +117,7 @@ role:
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.fleet_api }} - {{ portgroups.fleet_api }}
- {{ portgroups.yum }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }} - {{ portgroups.sensoroni }}
@@ -200,6 +201,7 @@ role:
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.fleet_api }} - {{ portgroups.fleet_api }}
- {{ portgroups.yum }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }} - {{ portgroups.sensoroni }}
@@ -283,6 +285,7 @@ role:
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.fleet_api }} - {{ portgroups.fleet_api }}
- {{ portgroups.yum }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }} - {{ portgroups.sensoroni }}

View File

@@ -94,3 +94,6 @@ firewall:
wazuh_authd: wazuh_authd:
tcp: tcp:
- 1515 - 1515
yum:
tcp:
- 443

View File

@@ -1 +1 @@
# Put your own custom Snort/Suricata rules in /opt/so/saltstack/local/salt/idstools/localrules/local.rules # Put your own custom Snort/Suricata rules in this file! /opt/so/saltstack/local/salt/idstools/localrules/local.rules

View File

@@ -2,6 +2,8 @@
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see: # For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/ # * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/
@@ -182,6 +184,19 @@ http {
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
{%- if ISAIRGAP is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
location /grafana/ { location /grafana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /grafana/(.*) /$1 break;

View File

@@ -2,6 +2,8 @@
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see: # For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/ # * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/
@@ -232,6 +234,19 @@ http {
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
{%- if ISAIRGAP is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
{%- if FLEET_NODE %} {%- if FLEET_NODE %}
location /fleet/ { location /fleet/ {
return 301 https://{{ FLEET_IP }}/fleet; return 301 https://{{ FLEET_IP }}/fleet;

View File

@@ -2,6 +2,8 @@
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see: # For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/ # * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/
@@ -181,6 +183,19 @@ http {
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
{%- if ISAIRGAP is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
location /grafana/ { location /grafana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /grafana/(.*) /$1 break;

View File

@@ -2,6 +2,7 @@
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see: # For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/ # * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/
@@ -182,6 +183,20 @@ http {
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
{%- if ISAIRGAP is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
location /grafana/ { location /grafana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /grafana/(.*) /$1 break;

View File

@@ -8,6 +8,7 @@
{% set MANAGER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap') %}
# Drop the correct nginx config based on role # Drop the correct nginx config based on role
nginxconfdir: nginxconfdir:
@@ -77,6 +78,9 @@ so-nginx:
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro - /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
- /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro - /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
- /opt/so/conf/fleet/packages:/opt/socore/html/packages - /opt/so/conf/fleet/packages:/opt/socore/html/packages
{% if ISAIRGAP is sameas true %}
- /nsm/repo:/opt/socore/html/repo:ro
{% endif %}
# ATT&CK Navigator binds # ATT&CK Navigator binds
- /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro - /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro
- /opt/so/conf/navigator/nav_layer_playbook.json:/opt/socore/html/navigator/assets/playbook.json:ro - /opt/so/conf/navigator/nav_layer_playbook.json:/opt/socore/html/navigator/assets/playbook.json:ro

View File

@@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
import logging
from time import sleep
from os import remove
log = logging.getLogger(__name__)
def start(interval=30):
log.info("checkmine engine started")
minionid = __grains__['id']
while True:
try:
ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt']
log.info('Successfully queried Salt mine for the CA.')
except:
log.error('Could not pull CA from the Salt mine.')
log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid)
try:
remove('/var/cache/salt/master/minions/%s/mine.p' % minionid)
log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid)
except FileNotFoundError:
log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid)
__salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt')
log.warning('Salt mine repopulated with /etc/pki/ca.crt')
sleep(interval)

View File

@@ -0,0 +1,6 @@
engines_dirs:
- /etc/salt/engines
engines:
- checkmine:
interval: 30

View File

@@ -3,6 +3,9 @@
{% if 'salt.master' in top_states %} {% if 'salt.master' in top_states %}
include:
- salt.minion
salt_master_package: salt_master_package:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
@@ -15,4 +18,19 @@ salt_master_service:
- name: salt-master - name: salt-master
- enable: True - enable: True
checkmine_engine:
file.managed:
- name: /etc/salt/engines/checkmine.py
- source: salt://salt/engines/checkmine.py
- makedirs: True
- watch_in:
- service: salt_minion_service
engines_config:
file.managed:
- name: /etc/salt/minion.d/engines.conf
- source: salt://salt/files/engines.conf
- watch_in:
- service: salt_minion_service
{% endif %} {% endif %}

View File

@@ -6,52 +6,39 @@ selfservice:
password: password:
enabled: true enabled: true
flows:
settings: settings:
privileged_session_max_age: 1m ui_url: https://{{ WEBACCESS }}/?r=/settings
after:
profile:
hooks:
- hook: verify
verify: verification:
return_to: https://{{ WEBACCESS }}/ ui_url: https://{{ WEBACCESS }}/
logout:
redirect_to: https://{{ WEBACCESS }}/login/
login: login:
request_lifespan: 10m ui_url: https://{{ WEBACCESS }}/login/
error:
ui_url: https://{{ WEBACCESS }}/login/
registration: registration:
request_lifespan: 10m ui_url: https://{{ WEBACCESS }}/login/
after:
password: default_browser_return_url: https://{{ WEBACCESS }}/
hooks: whitelisted_return_urls:
- hook: session - http://127.0.0.1
- hook: verify
log: log:
level: debug level: debug
format: json format: json
secrets: secrets:
session: default:
- {{ KRATOSKEY }} - {{ KRATOSKEY }}
urls: serve:
login_ui: https://{{ WEBACCESS }}/login/ public:
registration_ui: https://{{ WEBACCESS }}/login/ base_url: https://{{ WEBACCESS }}/auth/
error_ui: https://{{ WEBACCESS }}/login/ admin:
settings_ui: https://{{ WEBACCESS }}/?r=/settings base_url: https://{{ WEBACCESS }}/kratos/
verify_ui: https://{{ WEBACCESS }}/
mfa_ui: https://{{ WEBACCESS }}/
self:
public: https://{{ WEBACCESS }}/auth/
admin: https://{{ WEBACCESS }}/kratos/
default_return_to: https://{{ WEBACCESS }}/
whitelisted_return_to_urls:
- http://127.0.0.1
hashers: hashers:
argon2: argon2:
@@ -62,7 +49,6 @@ hashers:
key_length: 32 key_length: 32
identity: identity:
traits:
default_schema_url: file:///kratos-conf/schema.json default_schema_url: file:///kratos-conf/schema.json
courier: courier:

View File

@@ -2,6 +2,9 @@
"$id": "securityonion.schema.json", "$id": "securityonion.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#", "$schema": "http://json-schema.org/draft-07/schema#",
"title": "Person", "title": "Person",
"type": "object",
"properties": {
"traits": {
"type": "object", "type": "object",
"properties": { "properties": {
"email": { "email": {
@@ -31,6 +34,10 @@
"role": { "role": {
"type": "string", "type": "string",
"title": "Role" "title": "Role"
},
"status": {
"type": "string",
"title": "Status"
} }
}, },
"required": [ "required": [
@@ -38,3 +45,5 @@
], ],
"additionalProperties": false "additionalProperties": false
} }
}
}

View File

@@ -141,6 +141,12 @@
{ "name": "x509", "description": "x.509 grouped by issuer", "query": "event.dataset:x509 | groupby x509.certificate.issuer"}, { "name": "x509", "description": "x.509 grouped by issuer", "query": "event.dataset:x509 | groupby x509.certificate.issuer"},
{ "name": "x509", "description": "x.509 grouped by subject", "query": "event.dataset:x509 | groupby x509.certificate.subject"}, { "name": "x509", "description": "x.509 grouped by subject", "query": "event.dataset:x509 | groupby x509.certificate.subject"},
{ "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"} { "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"}
],
"actions": [
{ "name": "", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}" },
{ "name": "", "description": "actionAlertHelp", "icon": "fa-bell", "link": "/soctopus/thehive/alert/{eventId}" },
{ "name": "", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}" },
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "", "link": "https://www.virustotal.com/gui/search/{value}" }
] ]
} }
} }

View File

@@ -65,6 +65,7 @@ playbook_ext_url = https://{{MANAGER}}/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no playbook_verifycert = no
playbook_unit_test_index = playbook-testing playbook_unit_test_index = playbook-testing
playbook_rulesets = windows
[log] [log]
logfile = /var/log/SOCtopus/soctopus.log logfile = /var/log/SOCtopus/soctopus.log

View File

@@ -13,7 +13,7 @@
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} {% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %} {% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %}
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %} {% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
{% set ca_server = grains.id %} {% set ca_server = grains.id %}
{% else %} {% else %}
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %} {% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}

View File

@@ -2,8 +2,8 @@ suricata:
config: config:
vars: vars:
address-groups: address-groups:
HOME_NET: "[192.168.0.0/16]" HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
EXTERNAL_NET: "!$HOME_NET" EXTERNAL_NET: "any"
HTTP_SERVERS: "$HOME_NET" HTTP_SERVERS: "$HOME_NET"
SMTP_SERVERS: "$HOME_NET" SMTP_SERVERS: "$HOME_NET"
SQL_SERVERS: "$HOME_NET" SQL_SERVERS: "$HOME_NET"

View File

@@ -1,25 +1,32 @@
{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', '') -%} {% set ZEEKVER = salt['pillar.get']('global:zeekversion', '') %}
{%- set WAZUH = salt['pillar.get']('global:wazuh', '0') -%} {% set WAZUH = salt['pillar.get']('global:wazuh', '0') %}
{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') -%} {% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') -%} {% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{%- set FREQSERVER = salt['pillar.get']('manager:freq', '0') -%} {% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{%- set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') -%} {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} {% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} {% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %}
{%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%} {% set STRELKA = salt['pillar.get']('strelka:enabled', '0') %}
{% import_yaml 'salt/minion.defaults.yaml' as salt %} {% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{% set saltversion = salt.salt.minion.version %} {% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
{% set saltversion = saltversion.salt.minion.version %}
base: base:
'not G@saltversion:{{saltversion}}': 'not G@saltversion:{{saltversion}}':
- match: compound - match: compound
{% if ISAIRGAP is sameas true %}
- airgap
{% endif %}
- salt.minion - salt.minion
'G@os:CentOS and G@saltversion:{{saltversion}}': 'G@os:CentOS and G@saltversion:{{saltversion}}':
- match: compound - match: compound
{% if ISAIRGAP is sameas true %}
- airgap
{% else %}
- yum - yum
{% endif %}
- yum.packages - yum.packages
'* and G@saltversion:{{saltversion}}': '* and G@saltversion:{{saltversion}}':
@@ -31,6 +38,7 @@ base:
'*_helix and G@saltversion:{{saltversion}}': '*_helix and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master
- ca - ca
- ssl - ssl
- common - common
@@ -72,6 +80,7 @@ base:
'*_eval and G@saltversion:{{saltversion}}': '*_eval and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master
- ca - ca
- ssl - ssl
- common - common
@@ -129,6 +138,7 @@ base:
'*_manager and G@saltversion:{{saltversion}}': '*_manager and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master
- ca - ca
- ssl - ssl
- common - common
@@ -175,6 +185,7 @@ base:
'*_standalone and G@saltversion:{{saltversion}}': '*_standalone and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master
- ca - ca
- ssl - ssl
- common - common
@@ -195,6 +206,7 @@ base:
{%- if WAZUH != 0 %} {%- if WAZUH != 0 %}
- wazuh - wazuh
{%- endif %} {%- endif %}
- elasticsearch
- logstash - logstash
- redis - redis
- kibana - kibana
@@ -298,6 +310,7 @@ base:
'*_managersearch and G@saltversion:{{saltversion}}': '*_managersearch and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master
- ca - ca
- ssl - ssl
- common - common
@@ -388,6 +401,7 @@ base:
'*_import and G@saltversion:{{saltversion}}': '*_import and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master
- ca - ca
- ssl - ssl
- common - common

View File

@@ -20,6 +20,9 @@ if [ "$(id -u)" -ne 0 ]; then
exit 1 exit 1
fi fi
# Install a GUI text editor
yum -y install gedit
# Install misc utils # Install misc utils
yum -y install wget curl unzip epel-release; yum -y install wget curl unzip epel-release;
@@ -28,15 +31,41 @@ yum -y groupinstall "X Window System";
yum -y install gnome-classic-session gnome-terminal nautilus-open-terminal control-center liberation-mono-fonts; yum -y install gnome-classic-session gnome-terminal nautilus-open-terminal control-center liberation-mono-fonts;
unlink /etc/systemd/system/default.target; unlink /etc/systemd/system/default.target;
ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target; ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target;
yum -y install file-roller
# NetworkMiner has a compatibility issue with Mono 6 right now
if ! grep -q "NetworkMiner has a compatibility issue with Mono 6 right now" /etc/yum/pluginconf.d/versionlock.list; then
cat << EOF >> /etc/yum/pluginconf.d/versionlock.list
# NetworkMiner has a compatibility issue with Mono 6 right now
0:mono-complete-4.2.1.102-0.xamarin.1.*
0:mono-core-4.2.1.102-0.xamarin.1.*
0:mono-data-4.2.1.102-0.xamarin.1.*
0:mono-data-oracle-4.2.1.102-0.xamarin.1.*
0:mono-data-sqlite-4.2.1.102-0.xamarin.1.*
0:mono-devel-4.2.1.102-0.xamarin.1.*
0:mono-extras-4.2.1.102-0.xamarin.1.*
0:mono-locale-extras-4.2.1.102-0.xamarin.1.*
0:mono-mvc-4.2.1.102-0.xamarin.1.*
0:mono-nunit-4.2.1.102-0.xamarin.1.*
0:mono-reactive-4.2.1.102-0.xamarin.1.*
0:mono-wcf-4.2.1.102-0.xamarin.1.*
0:mono-web-4.2.1.102-0.xamarin.1.*
0:mono-winforms-4.2.1.102-0.xamarin.1.*
0:mono-winfxcore-4.2.1.102-0.xamarin.1.*
EOF
fi
# Install Mono - prereq for NetworkMiner # Install Mono - prereq for NetworkMiner
rpmkeys --import "http://pool.sks-keyservers.net/pks/lookup?op=get&search=0x3fa7e0328081bff6a14da29aa6a19b38d3d831ef"; rpmkeys --import "http://pool.sks-keyservers.net/pks/lookup?op=get&search=0x3fa7e0328081bff6a14da29aa6a19b38d3d831ef";
curl https://download.mono-project.com/repo/centos7-stable.repo | tee /etc/yum.repos.d/mono-centos7-stable.repo; curl https://download.mono-project.com/repo/centos7-stable.repo | tee /etc/yum.repos.d/mono-centos7-stable.repo;
yum -y install mono-devel; yum -y install mono-core mono-basic mono-winforms expect
# Install NetworkMiner # Install NetworkMiner
yum -y install libcanberra-gtk2; yum -y install libcanberra-gtk2;
wget https://www.netresec.com/?download=NetworkMiner -O /tmp/nm.zip; wget https://www.netresec.com/?download=NetworkMiner_2-4 -O /tmp/nm.zip;
mkdir -p /opt/networkminer/ mkdir -p /opt/networkminer/
unzip /tmp/nm.zip -d /opt/networkminer/; unzip /tmp/nm.zip -d /opt/networkminer/;
rm /tmp/nm.zip; rm /tmp/nm.zip;

View File

@@ -480,72 +480,6 @@ check_requirements() {
fi fi
} }
copy_salt_master_config() {
# Copy the Salt master config template to the proper directory
if [ "$setup_type" = 'iso' ]; then
cp /root/SecurityOnion/files/master /etc/salt/master >> "$setup_log" 2>&1
else
cp ../files/master /etc/salt/master >> "$setup_log" 2>&1
fi
# Restart the service so it picks up the changes
systemctl restart salt-master >> "$setup_log" 2>&1
}
copy_minion_tmp_files() {
case "$install_type" in
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
if [ -d "$temp_install_dir"/salt ] ; then
cp -Rv "$temp_install_dir"/salt/ $local_salt_dir/ >> "$setup_log" 2>&1
fi
;;
*)
{
echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
if [ -d $temp_install_dir/salt/patch/os/schedules/ ]; then
if [ "$(ls -A $temp_install_dir/salt/patch/os/schedules/)" ]; then
scp -prv -i /root/.ssh/so.key $temp_install_dir/salt/patch/os/schedules/* soremote@$MSRV:/tmp/$MINION_ID/schedules;
fi
fi
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
} >> "$setup_log" 2>&1
;;
esac
}
copy_ssh_key() {
echo "Generating SSH key"
# Generate SSH key
mkdir -p /root/.ssh
ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
echo "Copying the SSH key to the manager"
#Copy the key over to the manager
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
}
create_local_directories() {
echo "Creating local pillar and salt directories"
PILLARSALTDIR=${SCRIPTDIR::-5}
for i in "pillar" "salt"; do
for d in $(find $PILLARSALTDIR/$i -type d); do
suffixdir=${d//$PILLARSALTDIR/}
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
fi
done
chown -R socore:socore "$local_salt_dir/$i"
done
}
configure_network_sensor() { configure_network_sensor() {
echo "Setting up sensor interface" >> "$setup_log" 2>&1 echo "Setting up sensor interface" >> "$setup_log" 2>&1
local nic_error=0 local nic_error=0
@@ -630,6 +564,77 @@ configure_network_sensor() {
fi fi
} }
copy_salt_master_config() {
# Copy the Salt master config template to the proper directory
if [ "$setup_type" = 'iso' ]; then
cp /root/SecurityOnion/files/master /etc/salt/master >> "$setup_log" 2>&1
else
cp ../files/master /etc/salt/master >> "$setup_log" 2>&1
fi
# Restart the service so it picks up the changes
systemctl restart salt-master >> "$setup_log" 2>&1
}
copy_minion_tmp_files() {
case "$install_type" in
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
if [ -d "$temp_install_dir"/salt ] ; then
cp -Rv "$temp_install_dir"/salt/ $local_salt_dir/ >> "$setup_log" 2>&1
fi
;;
*)
{
echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
if [ -d $temp_install_dir/salt/patch/os/schedules/ ]; then
if [ "$(ls -A $temp_install_dir/salt/patch/os/schedules/)" ]; then
scp -prv -i /root/.ssh/so.key $temp_install_dir/salt/patch/os/schedules/* soremote@$MSRV:/tmp/$MINION_ID/schedules;
fi
fi
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
} >> "$setup_log" 2>&1
;;
esac
}
copy_ssh_key() {
echo "Generating SSH key"
# Generate SSH key
mkdir -p /root/.ssh
ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
echo "Copying the SSH key to the manager"
#Copy the key over to the manager
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
}
create_local_directories() {
echo "Creating local pillar and salt directories"
PILLARSALTDIR=${SCRIPTDIR::-5}
for i in "pillar" "salt"; do
for d in $(find $PILLARSALTDIR/$i -type d); do
suffixdir=${d//$PILLARSALTDIR/}
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
fi
done
chown -R socore:socore "$local_salt_dir/$i"
done
}
create_repo() {
# Create the repo for airgap
createrepo /nsm/repo
}
detect_cloud() { detect_cloud() {
echo "Testing if setup is running on a cloud instance..." >> "$setup_log" 2>&1 echo "Testing if setup is running on a cloud instance..." >> "$setup_log" 2>&1
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi
@@ -1097,6 +1102,14 @@ manager_global() {
" url_base: $REDIRECTIT"\ " url_base: $REDIRECTIT"\
" managerip: $MAINIP" > "$global_pillar" " managerip: $MAINIP" > "$global_pillar"
if [[ $is_airgap ]]; then
printf '%s\n'\
" airgap: True"\ >> "$global_pillar"
else
printf '%s\n'\
" airgap: False"\ >> "$global_pillar"
fi
# Check if TheHive is enabled. If so, add creds and other details # Check if TheHive is enabled. If so, add creds and other details
if [[ "$THEHIVE" == "1" ]]; then if [[ "$THEHIVE" == "1" ]]; then
printf '%s\n'\ printf '%s\n'\
@@ -1860,9 +1873,11 @@ set_redirect() {
set_updates() { set_updates() {
if [ "$MANAGERUPDATES" = '1' ]; then if [ "$MANAGERUPDATES" = '1' ]; then
if [ "$OS" = 'centos' ]; then if [ "$OS" = 'centos' ]; then
if [[ ! $is_airgap ]]; then
if ! grep -q "$MSRV" /etc/yum.conf; then if ! grep -q "$MSRV" /etc/yum.conf; then
echo "proxy=http://$MSRV:3142" >> /etc/yum.conf echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
fi fi
fi
else else
# Set it up so the updates roll through the manager # Set it up so the updates roll through the manager
printf '%s\n'\ printf '%s\n'\

View File

@@ -193,16 +193,16 @@ if [[ "$setup_type" == 'iso' ]]; then
is_iso=true is_iso=true
fi fi
#Check if this is an airgap install # Check if this is an airgap install
#if [[ $is_manager ]]; then if [[ $is_manager ]]; then
# if [[ $is_iso ]]; then if [[ $is_iso ]]; then
# whiptail_airgap whiptail_airgap
# if [[ "$INTERWEBS" == 'AIRGAP' ]]; then if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
# is_airgap=true is_airgap=true
# fi fi
# fi fi
#fi fi
if [[ $is_manager && $is_sensor ]]; then if [[ $is_manager && $is_sensor ]]; then
check_requirements "standalone" check_requirements "standalone"
@@ -411,6 +411,8 @@ if [[ $is_manager || $is_import ]]; then whiptail_so_allow; fi
whiptail_make_changes whiptail_make_changes
# From here on changes will be made.
if [[ -n "$TURBO" ]]; then if [[ -n "$TURBO" ]]; then
use_turbo_proxy use_turbo_proxy
fi fi
@@ -460,6 +462,11 @@ fi
# Set initial percentage to 0 # Set initial percentage to 0
export percentage=0 export percentage=0
if [[ $is_manager && $is_airgap ]]; then
info "Creating airgap repo"
create_repo >> $setup_log 2>&1
fi
if [[ $is_minion ]]; then if [[ $is_minion ]]; then
set_progress_str 1 'Configuring firewall' set_progress_str 1 'Configuring firewall'
set_initial_firewall_policy >> $setup_log 2>&1 set_initial_firewall_policy >> $setup_log 2>&1

View File

@@ -97,8 +97,8 @@ whiptail_zeek_version() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 75 4 "ZEEK" "Install Zeek (aka Bro)" ON \ ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate metadata?" 20 75 4 "ZEEK" "Zeek (formerly known as Bro)" ON \
"SURICATA" "Use Suricata 5" OFF 3>&1 1>&2 2>&3) "SURICATA" "Suricata" OFF 3>&1 1>&2 2>&3)
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
@@ -412,6 +412,13 @@ whiptail_enable_components() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
GRAFANA=0
OSQUERY=0
WAZUH=0
THEHIVE=0
PLAYBOOK=0
STRELKA=0
COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \ COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
"Select Components to install" 20 75 8 \ "Select Components to install" 20 75 8 \
GRAFANA "Enable Grafana for system monitoring" ON \ GRAFANA "Enable Grafana for system monitoring" ON \
@@ -621,9 +628,8 @@ whiptail_nids() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
NIDS=$(whiptail --title "Security Onion Setup" --radiolist \ NIDS=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose which IDS to run. \n "Choose which IDS to run. \n\n(Snort 3.0 support will be added once it is out of beta.)" 25 75 4 \
Snort 3.0 support will be added once it is out of beta:" 25 75 4 \ "Suricata" "Suricata" ON \
"Suricata" "Suricata 4.X" ON \
"Snort" "Placeholder for Snort 3.0 " OFF 3>&1 1>&2 2>&3 ) "Snort" "Placeholder for Snort 3.0 " OFF 3>&1 1>&2 2>&3 )
local exitstatus=$? local exitstatus=$?