merge and resolve conflict in elasticsearch state

This commit is contained in:
m0duspwnens
2021-05-27 11:33:44 -04:00
20 changed files with 129 additions and 252 deletions

1
HOTFIX
View File

@@ -1 +0,0 @@
GRIDFIX

View File

@@ -1,6 +1,6 @@
## Security Onion 2.3.50 ## Security Onion 2.3.51
Security Onion 2.3.50 is here! Security Onion 2.3.51 is here!
## Screenshots ## Screenshots

View File

@@ -1,17 +1,17 @@
### 2.3.50 ISO image built on 2021/04/27 ### 2.3.51 ISO image built on 2021/04/27
### Download and Verify ### Download and Verify
2.3.50 ISO image: 2.3.51 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso https://download.securityonion.net/file/securityonion/securityonion-2.3.51.iso
MD5: C39CEA68B5A8AFC5CFFB2481797C0374 MD5: 7CFB525BEFC0A9F2ED148F5831E387FA
SHA1: 00AD9F29ABE3AB495136989E62EBB8FA00DA82C6 SHA1: 8CC34FCCC36822B309B8168AA706B3D1EC7F3BFD
SHA256: D77AE370D7863837A989F6735413D1DD46B866D8D135A4C363B0633E3990387E SHA256: 9892C2546C9AE5A48015160F379B070F0BE30C89693B97F3F1E1592DDCE1DEE0
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.51.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.51.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.3.51.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.3.50.iso.sig securityonion-2.3.50.iso gpg --verify securityonion-2.3.51.iso.sig securityonion-2.3.51.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Tue 27 Apr 2021 02:17:25 PM EDT using RSA key ID FE507013 gpg: Signature made Thu 20 May 2021 07:49:57 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1,208 +0,0 @@
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval:
containers:
- so-nginx
- so-telegraf
{% if GRAFANA == '1' %}
- so-influxdb
- so-grafana
{% endif %}
- so-dockerregistry
- so-soc
- so-kratos
- so-idstools
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
{% endif %}
- so-elasticsearch
- so-logstash
- so-kibana
- so-steno
- so-suricata
- so-zeek
- so-curator
- so-elastalert
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
- so-soctopus
{% if THEHIVE != '0' %}
- so-thehive
- so-thehive-es
- so-cortex
{% endif %}
{% if PLAYBOOK != '0' %}
- so-playbook
{% endif %}
{% if FREQSERVER != '0' %}
- so-freqserver
{% endif %}
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
heavy_node:
containers:
- so-nginx
- so-telegraf
- so-redis
- so-logstash
- so-elasticsearch
- so-curator
- so-steno
- so-suricata
- so-wazuh
- so-filebeat
{% if ZEEKVER != 'SURICATA' %}
- so-zeek
{% endif %}
helix:
containers:
- so-nginx
- so-telegraf
- so-idstools
- so-steno
- so-zeek
- so-redis
- so-logstash
- so-filebeat
hot_node:
containers:
- so-nginx
- so-telegraf
- so-logstash
- so-elasticsearch
- so-curator
manager_search:
containers:
- so-nginx
- so-telegraf
- so-soc
- so-kratos
- so-acng
- so-idstools
- so-redis
- so-logstash
- so-elasticsearch
- so-curator
- so-kibana
- so-elastalert
- so-filebeat
- so-soctopus
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
{% endif %}
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
- so-soctopus
{% if THEHIVE != '0' %}
- so-thehive
- so-thehive-es
- so-cortex
{% endif %}
{% if PLAYBOOK != '0' %}
- so-playbook
{% endif %}
{% if FREQSERVER != '0' %}
- so-freqserver
{% endif %}
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
manager:
containers:
- so-dockerregistry
- so-nginx
- so-telegraf
{% if GRAFANA == '1' %}
- so-influxdb
- so-grafana
{% endif %}
- so-soc
- so-kratos
- so-acng
- so-idstools
- so-redis
- so-elasticsearch
- so-logstash
- so-kibana
- so-elastalert
- so-filebeat
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
{% endif %}
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
- so-soctopus
{% if THEHIVE != '0' %}
- so-thehive
- so-thehive-es
- so-cortex
{% endif %}
{% if PLAYBOOK != '0' %}
- so-playbook
{% endif %}
{% if FREQSERVER != '0' %}
- so-freqserver
{% endif %}
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
parser_node:
containers:
- so-nginx
- so-telegraf
- so-logstash
search_node:
containers:
- so-nginx
- so-telegraf
- so-logstash
- so-elasticsearch
- so-curator
- so-filebeat
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
sensor:
containers:
- so-nginx
- so-telegraf
- so-steno
- so-suricata
{% if ZEEKVER != 'SURICATA' %}
- so-zeek
{% endif %}
- so-wazuh
- so-filebeat
warm_node:
containers:
- so-nginx
- so-telegraf
- so-elasticsearch
fleet:
containers:
{% if FLEETNODE %}
- so-mysql
- so-fleet
- so-redis
- so-filebeat
- so-nginx
- so-telegraf
{% endif %}

View File

@@ -96,7 +96,6 @@ commonpkgs:
- netcat - netcat
- python3-mysqldb - python3-mysqldb
- sqlite3 - sqlite3
- argon2
- libssl-dev - libssl-dev
- python3-dateutil - python3-dateutil
- python3-m2crypto - python3-m2crypto
@@ -129,7 +128,6 @@ commonpkgs:
- net-tools - net-tools
- curl - curl
- sqlite - sqlite
- argon2
- mariadb-devel - mariadb-devel
- nmap-ncat - nmap-ncat
- python3 - python3

View File

@@ -488,6 +488,7 @@ wait_for_web_response() {
maxAttempts=${3:-300} maxAttempts=${3:-300}
curlcmd=${4:-curl} curlcmd=${4:-curl}
logfile=/root/wait_for_web_response.log logfile=/root/wait_for_web_response.log
truncate -s 0 "$logfile"
attempt=0 attempt=0
while [[ $attempt -lt $maxAttempts ]]; do while [[ $attempt -lt $maxAttempts ]]; do
attempt=$((attempt+1)) attempt=$((attempt+1))

View File

@@ -128,7 +128,7 @@ update_docker_containers() {
mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1 mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1
# Let's make sure we have the public key # Let's make sure we have the public key
retry 50 10 "curl -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -o $SIGNPATH/KEYS" >> "$LOG_FILE" 2>&1 retry 50 10 "curl -f -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -o $SIGNPATH/KEYS" >> "$LOG_FILE" 2>&1
result=$? result=$?
if [[ $result -eq 0 ]]; then if [[ $result -eq 0 ]]; then
cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1 cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
@@ -151,7 +151,7 @@ update_docker_containers() {
retry 50 10 "docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" >> "$LOG_FILE" 2>&1 retry 50 10 "docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" >> "$LOG_FILE" 2>&1
# Get signature # Get signature
retry 50 10 "curl -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" >> "$LOG_FILE" 2>&1 retry 50 10 "curl -f -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" >> "$LOG_FILE" 2>&1
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
echo "Unable to pull signature file for $image" >> "$LOG_FILE" 2>&1 echo "Unable to pull signature file for $image" >> "$LOG_FILE" 2>&1
exit 1 exit 1

View File

@@ -39,10 +39,9 @@ email=$2
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434} kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite} databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
argon2Iterations=${ARGON2_ITERATIONS:-3} bcryptRounds=${BCRYPT_ROUNDS:-12}
argon2Memory=${ARGON2_MEMORY:-14} elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
argon2Parallelism=${ARGON2_PARALLELISM:-2} elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
argon2HashSize=${ARGON2_HASH_SIZE:-32}
function fail() { function fail() {
msg=$1 msg=$1
@@ -58,7 +57,7 @@ function require() {
# Verify this environment is capable of running this script # Verify this environment is capable of running this script
function verifyEnvironment() { function verifyEnvironment() {
require "argon2" require "htpasswd"
require "jq" require "jq"
require "curl" require "curl"
require "openssl" require "openssl"
@@ -95,6 +94,16 @@ function validateEmail() {
fi fi
} }
function hashPassword() {
password=$1
passwordHash=$(echo "${password}" | htpasswd -niBC $bcryptRounds SOUSER)
passwordHash=$(echo "$passwordHash" | cut -c 11-)
passwordHash="\$2a${passwordHash}" # still waiting for https://github.com/elastic/elasticsearch/issues/51132
echo "$passwordHash"
}
function updatePassword() { function updatePassword() {
identityId=$1 identityId=$1
@@ -111,15 +120,61 @@ function updatePassword() {
if [[ -n $identityId ]]; then if [[ -n $identityId ]]; then
# Generate password hash # Generate password hash
salt=$(openssl rand -hex 8) passwordHash=$(hashPassword "$password")
passwordHash=$(echo "${password}" | argon2 ${salt} -id -t $argon2Iterations -m $argon2Memory -p $argon2Parallelism -l $argon2HashSize -e)
# Update DB with new hash # Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"${passwordHash}\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath" echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password" [[ $? != 0 ]] && fail "Unable to update password"
fi fi
} }
function createElasticTmpFile() {
filename=$1
tmpFile=${filename}.tmp
truncate -s 0 "$tmpFile"
chmod 600 "$tmpFile"
chown elasticsearch:elasticsearch "$tmpFile"
echo "$tmpFile"
}
function syncElastic() {
usersFileTmp=$(createElasticTmpFile "${elasticUsersFile}")
rolesFileTmp=$(createElasticTmpFile "${elasticRolesFile}")
sysUser=$(lookup_pillar "auth:user" "elasticsearch")
sysPass=$(lookup_pillar "auth:pass" "elasticsearch")
[[ -z "$sysUser" || -z "$sysPass" ]] && fail "Elastic auth credentials for system user are missing"
sysHash=$(hashPassword "$sysPass")
# Generate the new users file
echo "${sysUser}:${sysHash}" >> "$usersFileTmp"
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
"from identity_credential_identifiers ici, identity_credentials ic " \
"where ici.identity_credential_id=ic.id and ic.config like '%hashed_password%' " \
"order by ici.identifier;" | \
sqlite3 "$databasePath" | \
jq -r '.user + ":" + .data.hashed_password' \
>> "$usersFileTmp"
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
mv -f "$usersFileTmp" "$elasticUsersFile"
[[ $? != 0 ]] && fail "Unable to create users file: $elasticUsersFile"
# Generate the new users_roles file
echo "superuser:${sysUser}" >> "$rolesFileTmp"
echo "select 'superuser:' || ici.identifier " \
"from identity_credential_identifiers ici, identity_credentials ic " \
"where ici.identity_credential_id=ic.id and ic.config like '%hashed_password%' " \
"order by ici.identifier;" | \
sqlite3 "$databasePath" \
>> "$rolesFileTmp"
[[ $? != 0 ]] && fail "Unable to read credential IDs from database"
mv -f "$rolesFileTmp" "$elasticRolesFile"
[[ $? != 0 ]] && fail "Unable to create users file: $elasticRolesFile"
}
function syncAll() {
syncElastic
}
function listUsers() { function listUsers() {
response=$(curl -Ss -L ${kratosUrl}/identities) response=$(curl -Ss -L ${kratosUrl}/identities)
[[ $? != 0 ]] && fail "Unable to communicate with Kratos" [[ $? != 0 ]] && fail "Unable to communicate with Kratos"
@@ -259,6 +314,11 @@ case "${operation}" in
check_container fleet && so-fleet-user-enable "$email" false check_container fleet && so-fleet-user-enable "$email" false
;; ;;
"sync")
syncAll
echo "Synchronization complete"
;;
"validate") "validate")
validateEmail "$email" validateEmail "$email"
updatePassword updatePassword

View File

@@ -201,7 +201,6 @@ auth_users_roles:
- cmd: syncesusers - cmd: syncesusers
{% endif %} {% endif %}
so-elasticsearch: so-elasticsearch:
docker_container.running: docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}
@@ -235,6 +234,8 @@ so-elasticsearch:
- binds: - binds:
- /opt/so/conf/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro - /opt/so/conf/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/elasticsearch/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro - /opt/so/conf/elasticsearch/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/conf/elasticsearch/users:/usr/share/elasticsearch/config/users:ro
- /opt/so/conf/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles:ro
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro

View File

@@ -11,7 +11,7 @@
{% set GRAFANA_SETTINGS = salt['grains.filter_by'](default_settings, default='grafana', merge=salt['pillar.get']('grafana', {})) %} {% set GRAFANA_SETTINGS = salt['grains.filter_by'](default_settings, default='grafana', merge=salt['pillar.get']('grafana', {})) %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] or (grains.role == 'so-eval' and GRAFANA == 1) %}
# Grafana all the things # Grafana all the things
grafanadir: grafanadir:

View File

@@ -3,7 +3,7 @@
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] or (grains.role == 'so-eval' and GRAFANA == 1) %}
{% set MANAGER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
@@ -17,7 +17,6 @@
include: include:
- salt.minion - salt.minion
- salt.python3-influxdb - salt.python3-influxdb
# Influx DB # Influx DB
influxconfdir: influxconfdir:
file.directory: file.directory:
@@ -139,4 +138,4 @@ so_downsample_cq:
test.fail_without_changes: test.fail_without_changes:
- name: {{sls}}_state_not_allowed - name: {{sls}}_state_not_allowed
{% endif %} {% endif %}

View File

@@ -149,6 +149,12 @@ http {
root /opt/socore/html; root /opt/socore/html;
index index.html; index index.html;
add_header Content-Security-Policy "default-src 'self' 'unsafe-inline' 'unsafe-eval' https: data:; frame-ancestors 'self'";
add_header X-Frame-Options SAMEORIGIN;
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options nosniff;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload";
ssl_certificate "/etc/pki/nginx/server.crt"; ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key"; ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m; ssl_session_cache shared:SSL:1m;

View File

@@ -44,12 +44,24 @@ hold_salt_packages:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
remove_info_log_level_logfile:
file.line:
- name: /etc/salt/minion
- match: "log_level_logfile: info"
- mode: delete
remove_info_log_level:
file.line:
- name: /etc/salt/minion
- match: "log_level: info"
- mode: delete
set_log_levels: set_log_levels:
file.append: file.append:
- name: /etc/salt/minion - name: /etc/salt/minion
- text: - text:
- "log_level: info" - "log_level: error"
- "log_level_logfile: info" - "log_level_logfile: error"
- listen_in: - listen_in:
- service: salt_minion_service - service: salt_minion_service

View File

@@ -41,12 +41,8 @@ serve:
base_url: https://{{ WEBACCESS }}/kratos/ base_url: https://{{ WEBACCESS }}/kratos/
hashers: hashers:
argon2: bcrypt:
parallelism: 2 cost: 12
memory: 16384
iterations: 3
salt_length: 16
key_length: 32
identity: identity:
default_schema_url: file:///kratos-conf/schema.json default_schema_url: file:///kratos-conf/schema.json

View File

@@ -54,7 +54,7 @@
"verifyCert": false "verifyCert": false
}, },
"influxdb": { "influxdb": {
{%- if grains['role'] in ['so-import'] %} {%- if grains['role'] in ['so-import'] or (grains['role'] == 'so-eval' and GRAFANA == 0) %}
"hostUrl": "", "hostUrl": "",
{%- else %} {%- else %}
"hostUrl": "https://{{ MANAGERIP }}:8086", "hostUrl": "https://{{ MANAGERIP }}:8086",

View File

@@ -78,6 +78,7 @@ zeekspoolownership:
file.directory: file.directory:
- name: /nsm/zeek/spool - name: /nsm/zeek/spool
- user: 937 - user: 937
- max_depth: 0
- recurse: - recurse:
- user - user

View File

@@ -34,7 +34,7 @@ ZEEKVERSION=ZEEK
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit
HOSTNAME=distributed-sensor HOSTNAME=Distributed-Sensor
install_type=SENSOR install_type=SENSOR
# LSINPUTBATCHCOUNT= # LSINPUTBATCHCOUNT=
# LSINPUTTHREADS= # LSINPUTTHREADS=

View File

@@ -2079,7 +2079,7 @@ saltify() {
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT') 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
reserve_group_ids >> "$setup_log" 2>&1 reserve_group_ids >> "$setup_log" 2>&1
if [[ ! $is_iso ]]; then if [[ ! $is_iso ]]; then
logCmd "yum -y install sqlite argon2 curl mariadb-devel" logCmd "yum -y install sqlite curl mariadb-devel"
fi fi
# Download Ubuntu Keys in case manager updates = 1 # Download Ubuntu Keys in case manager updates = 1
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
@@ -2175,7 +2175,7 @@ saltify() {
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
set_progress_str 6 'Installing various dependencies' set_progress_str 6 'Installing various dependencies'
retry 50 10 "apt-get -y install sqlite3 argon2 libssl-dev" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
set_progress_str 7 'Installing salt-master' set_progress_str 7 'Installing salt-master'
retry 50 10 "apt-get -y install salt-master=3003+ds-1" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install salt-master=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1

View File

@@ -408,6 +408,7 @@ whiptail_enable_components() {
PLAYBOOK=0 PLAYBOOK=0
STRELKA=0 STRELKA=0
if [[ $is_eval ]]; then
COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \ COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
"Select Components to install:" 20 75 8 \ "Select Components to install:" 20 75 8 \
GRAFANA "Enable Grafana for system monitoring" ON \ GRAFANA "Enable Grafana for system monitoring" ON \
@@ -416,6 +417,17 @@ whiptail_enable_components() {
THEHIVE "Enable TheHive" ON \ THEHIVE "Enable TheHive" ON \
PLAYBOOK "Enable Playbook" ON \ PLAYBOOK "Enable Playbook" ON \
STRELKA "Enable Strelka" ON 3>&1 1>&2 2>&3) STRELKA "Enable Strelka" ON 3>&1 1>&2 2>&3)
else
COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
"Select Components to install:" 20 75 7 \
OSQUERY "Enable Fleet with osquery" ON \
WAZUH "Enable Wazuh" ON \
THEHIVE "Enable TheHive" ON \
PLAYBOOK "Enable Playbook" ON \
STRELKA "Enable Strelka" ON 3>&1 1>&2 2>&3)
export "GRAFANA=1"
fi
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus

Binary file not shown.