mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge remote-tracking branch 'origin/2.4/dev' into bravo
This commit is contained in:
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -32,6 +32,7 @@ body:
|
||||
- 2.4.170
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- 2.4.200
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.180-20250916 ISO image released on 2025/09/17
|
||||
### 2.4.190-20251024 ISO image released on 2025/10/24
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.180-20250916 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
|
||||
2.4.190-20251024 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
|
||||
MD5: DE93880E38DE4BE45D05A41E1745CB1F
|
||||
SHA1: AEA6948911E50A4A38E8729E0E965C565402E3FC
|
||||
SHA256: C9BD8CA071E43B048ABF9ED145B87935CB1D4BB839B2244A06FAD1BBA8EAC84A
|
||||
MD5: 25358481FB876226499C011FC0710358
|
||||
SHA1: 0B26173C0CE136F2CA40A15046D1DFB78BCA1165
|
||||
SHA256: 4FD9F62EDA672408828B3C0C446FE5EA9FF3C4EE8488A7AB1101544A3C487872
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.180-20250916.iso.sig securityonion-2.4.180-20250916.iso
|
||||
gpg --verify securityonion-2.4.190-20251024.iso.sig securityonion-2.4.190-20251024.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Tue 16 Sep 2025 06:30:19 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Thu 23 Oct 2025 07:21:46 AM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
91
salt/_modules/hypervisor.py
Normal file
91
salt/_modules/hypervisor.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/opt/saltstack/salt/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
"""
|
||||
Salt execution module for hypervisor operations.
|
||||
|
||||
This module provides functions for managing hypervisor configurations,
|
||||
including VM file management.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'hypervisor'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
"""
|
||||
Only load this module if we're on a system that can manage hypervisors.
|
||||
"""
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
|
||||
"""
|
||||
Remove a VM entry from the hypervisorVMs file.
|
||||
|
||||
Args:
|
||||
vms_file_path (str): Path to the hypervisorVMs file
|
||||
vm_hostname (str): Hostname of the VM to remove (without role suffix)
|
||||
vm_role (str): Role of the VM
|
||||
|
||||
Returns:
|
||||
dict: Result dictionary with success status and message
|
||||
|
||||
CLI Example:
|
||||
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
|
||||
"""
|
||||
try:
|
||||
# Check if file exists
|
||||
if not os.path.exists(vms_file_path):
|
||||
msg = f"VMs file not found: {vms_file_path}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
# Read current VMs
|
||||
with open(vms_file_path, 'r') as f:
|
||||
content = f.read().strip()
|
||||
vms = json.loads(content) if content else []
|
||||
|
||||
# Find and remove the VM entry
|
||||
original_count = len(vms)
|
||||
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
|
||||
|
||||
if len(vms) < original_count:
|
||||
# VM was found and removed, write back to file
|
||||
with open(vms_file_path, 'w') as f:
|
||||
json.dump(vms, f, indent=2)
|
||||
|
||||
# Set socore:socore ownership (939:939)
|
||||
os.chown(vms_file_path, 939, 939)
|
||||
|
||||
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
|
||||
log.info(msg)
|
||||
return {'result': True, 'comment': msg}
|
||||
else:
|
||||
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
|
||||
log.warning(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
except Exception as e:
|
||||
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
@@ -220,12 +220,22 @@ compare_es_versions() {
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
# Define files to exclude from deletion (relative to their respective base directories)
|
||||
local EXCLUDE_FILES=(
|
||||
"salt/hypervisor/soc_hypervisor.yaml"
|
||||
)
|
||||
|
||||
# Build rsync exclude arguments
|
||||
local EXCLUDE_ARGS=()
|
||||
for file in "${EXCLUDE_FILES[@]}"; do
|
||||
EXCLUDE_ARGS+=(--exclude="$file")
|
||||
done
|
||||
|
||||
# Copy new files over to the salt dir
|
||||
cd $UPDATE_DIR
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
|
||||
@@ -62,8 +62,6 @@ container_list() {
|
||||
"so-soc"
|
||||
"so-steno"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-filestream"
|
||||
"so-strelka-frontend"
|
||||
"so-strelka-manager"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
|
||||
@@ -222,6 +222,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||
fi
|
||||
|
||||
RESULT=0
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/elasticsearch/*.log"
|
||||
"/opt/so/log/elasticsearch/*.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
@@ -2,26 +2,30 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
{%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
||||
{%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
||||
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
||||
{% if grains.role == 'so-heavynode' %}
|
||||
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
||||
{% endif %}
|
||||
|
||||
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
||||
{% if not AGENT_STATUS %}
|
||||
|
||||
{% if grains.role not in ['so-heavynode'] %}
|
||||
run_installer:
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENGENERAL }}
|
||||
- retry: True
|
||||
{% else %}
|
||||
run_installer:
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENHEAVY }}
|
||||
- retry: True
|
||||
{% endif %}
|
||||
pull_agent_installer:
|
||||
file.managed:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
run_installer:
|
||||
cmd.run:
|
||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
|
||||
- cwd: /opt/so
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 20
|
||||
|
||||
cleanup_agent_installer:
|
||||
file.absent:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
{% endif %}
|
||||
|
||||
@@ -1991,6 +1991,70 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elasticsearch_x_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- logs-elasticsearch.server@package
|
||||
- logs-elasticsearch.server@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates:
|
||||
- logs-elasticsearch.server@custom
|
||||
index_patterns:
|
||||
- logs-elasticsearch.server-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-logs-elasticsearch.server-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-endpoint_x_actions:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
|
||||
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
|
||||
@@ -20,8 +20,28 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling.strategy.action.type = Delete
|
||||
appender.rolling.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling.strategy.action.condition.type = IfFileName
|
||||
appender.rolling.strategy.action.condition.glob = *.gz
|
||||
appender.rolling.strategy.action.condition.glob = *.log.gz
|
||||
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling.strategy.action.condition.nested_condition.age = 7D
|
||||
|
||||
appender.rolling_json.type = RollingFile
|
||||
appender.rolling_json.name = rolling_json
|
||||
appender.rolling_json.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.json
|
||||
appender.rolling_json.layout.type = ECSJsonLayout
|
||||
appender.rolling_json.layout.dataset = elasticsearch.server
|
||||
appender.rolling_json.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.json.gz
|
||||
appender.rolling_json.policies.type = Policies
|
||||
appender.rolling_json.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.rolling_json.policies.time.interval = 1
|
||||
appender.rolling_json.policies.time.modulate = true
|
||||
appender.rolling_json.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling_json.strategy.action.type = Delete
|
||||
appender.rolling_json.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling_json.strategy.action.condition.type = IfFileName
|
||||
appender.rolling_json.strategy.action.condition.glob = *.json.gz
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.rolling.ref = rolling
|
||||
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
||||
|
||||
@@ -392,6 +392,7 @@ elasticsearch:
|
||||
so-logs-elastic_agent_x_metricbeat: *indexSettings
|
||||
so-logs-elastic_agent_x_osquerybeat: *indexSettings
|
||||
so-logs-elastic_agent_x_packetbeat: *indexSettings
|
||||
so-logs-elasticsearch_x_server: *indexSettings
|
||||
so-metrics-endpoint_x_metadata: *indexSettings
|
||||
so-metrics-endpoint_x_metrics: *indexSettings
|
||||
so-metrics-endpoint_x_policy: *indexSettings
|
||||
|
||||
1159
salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate
Executable file
1159
salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate
Executable file
File diff suppressed because it is too large
Load Diff
@@ -58,10 +58,26 @@
|
||||
{% set role = vm.get('role', '') %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing VM - hostname: ' ~ hostname ~ ', role: ' ~ role) %}
|
||||
|
||||
{# Load VM configuration from config file #}
|
||||
{# Try to load VM configuration from config file first, then .error file if config doesn't exist #}
|
||||
{% set vm_file = 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role %}
|
||||
{% set vm_error_file = vm_file ~ '.error' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config file: ' ~ vm_file) %}
|
||||
|
||||
{# Check if base config file exists #}
|
||||
{% set config_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_file) %}
|
||||
{% set error_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_error_file) %}
|
||||
|
||||
{% set vm_state = none %}
|
||||
{% if config_exists %}
|
||||
{% import_json vm_file as vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from base file') %}
|
||||
{% elif error_exists %}
|
||||
{% import_json vm_error_file as vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from .error file') %}
|
||||
{% else %}
|
||||
{% do salt.log.warning('salt/hypervisor/map.jinja: No config or error file found for VM ' ~ hostname ~ '_' ~ role) %}
|
||||
{% endif %}
|
||||
|
||||
{% if vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config content: ' ~ vm_state | tojson) %}
|
||||
{% set vm_data = {'config': vm_state.config} %}
|
||||
@@ -85,7 +101,7 @@
|
||||
{% endif %}
|
||||
{% do vms.update({hostname ~ '_' ~ role: vm_data}) %}
|
||||
{% else %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Config file empty: ' ~ vm_file) %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Skipping VM ' ~ hostname ~ '_' ~ role ~ ' - no config available') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ used during VM provisioning to add dedicated NSM storage volumes.
|
||||
- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `<vm_name>-nsm.img`.
|
||||
- Volumes are attached as `/dev/vdb` using virtio-blk for high performance.
|
||||
- The script checks available disk space before creating the volume.
|
||||
- Ownership is set to `socore:socore` with permissions `644`.
|
||||
- Ownership is set to `qemu:qemu` with permissions `640`.
|
||||
- Without the `-S` flag, the VM remains stopped after volume attachment.
|
||||
|
||||
**Description:**
|
||||
@@ -98,7 +98,7 @@ The `so-kvm-create-volume` script creates and attaches NSM storage volumes using
|
||||
3. **Volume Creation:**
|
||||
- Creates volume directory if it doesn't exist
|
||||
- Uses `qemu-img create` with full pre-allocation
|
||||
- Sets proper ownership (socore:socore) and permissions (644)
|
||||
- Sets proper ownership (qemu:qemu) and permissions (640)
|
||||
- Validates volume creation success
|
||||
|
||||
4. **Volume Attachment:**
|
||||
@@ -279,20 +279,20 @@ def create_volume_file(vm_name, size_gb, logger):
|
||||
logger.error(f"VOLUME: qemu-img error: {e.stderr.strip()}")
|
||||
raise VolumeCreationError(f"Failed to create volume: {e}")
|
||||
|
||||
# Set ownership to socore:socore
|
||||
# Set ownership to qemu:qemu
|
||||
try:
|
||||
socore_uid = pwd.getpwnam('socore').pw_uid
|
||||
socore_gid = grp.getgrnam('socore').gr_gid
|
||||
os.chown(volume_path, socore_uid, socore_gid)
|
||||
logger.info(f"VOLUME: Set ownership to socore:socore")
|
||||
qemu_uid = pwd.getpwnam('qemu').pw_uid
|
||||
qemu_gid = grp.getgrnam('qemu').gr_gid
|
||||
os.chown(volume_path, qemu_uid, qemu_gid)
|
||||
logger.info(f"VOLUME: Set ownership to qemu:qemu")
|
||||
except (KeyError, OSError) as e:
|
||||
logger.error(f"VOLUME: Failed to set ownership: {e}")
|
||||
raise VolumeCreationError(f"Failed to set ownership: {e}")
|
||||
|
||||
# Set permissions to 644
|
||||
# Set permissions to 640
|
||||
try:
|
||||
os.chmod(volume_path, 0o644)
|
||||
logger.info(f"VOLUME: Set permissions to 644")
|
||||
os.chmod(volume_path, 0o640)
|
||||
logger.info(f"VOLUME: Set permissions to 640")
|
||||
except OSError as e:
|
||||
logger.error(f"VOLUME: Failed to set permissions: {e}")
|
||||
raise VolumeCreationError(f"Failed to set permissions: {e}")
|
||||
@@ -492,10 +492,10 @@ def main():
|
||||
|
||||
# Ensure volume directory exists before checking disk space
|
||||
try:
|
||||
os.makedirs(VOLUME_DIR, mode=0o755, exist_ok=True)
|
||||
socore_uid = pwd.getpwnam('socore').pw_uid
|
||||
socore_gid = grp.getgrnam('socore').gr_gid
|
||||
os.chown(VOLUME_DIR, socore_uid, socore_gid)
|
||||
os.makedirs(VOLUME_DIR, mode=0o754, exist_ok=True)
|
||||
qemu_uid = pwd.getpwnam('qemu').pw_uid
|
||||
qemu_gid = grp.getgrnam('qemu').gr_gid
|
||||
os.chown(VOLUME_DIR, qemu_uid, qemu_gid)
|
||||
logger.debug(f"VOLUME: Ensured volume directory exists: {VOLUME_DIR}")
|
||||
except Exception as e:
|
||||
logger.error(f"VOLUME: Failed to create volume directory: {e}")
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -31,6 +31,19 @@ libvirt_conf_dir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
libvirt_volumes:
|
||||
file.directory:
|
||||
- name: /nsm/libvirt/volumes
|
||||
- user: qemu
|
||||
- group: qemu
|
||||
- dir_mode: 755
|
||||
- file_mode: 640
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
- mode
|
||||
- makedirs: True
|
||||
|
||||
libvirt_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/libvirt/libvirtd.conf
|
||||
|
||||
3
salt/logstash/tools/sbin/so-logstash-flow-stats
Normal file
3
salt/logstash/tools/sbin/so-logstash-flow-stats
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
curl -s -L http://localhost:9600/_node/stats/flow | jq
|
||||
3
salt/logstash/tools/sbin/so-logstash-health
Normal file
3
salt/logstash/tools/sbin/so-logstash-health
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
curl -s -L http://localhost:9600/_health_report | jq
|
||||
3
salt/logstash/tools/sbin/so-logstash-jvm-stats
Normal file
3
salt/logstash/tools/sbin/so-logstash-jvm-stats
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
curl -s -L http://localhost:9600/_node/stats/jvm | jq
|
||||
@@ -5,10 +5,12 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
clone_to_tmp() {
|
||||
VERBOSE=0
|
||||
VERY_VERBOSE=0
|
||||
TEST_MODE=0
|
||||
|
||||
clone_to_tmp() {
|
||||
# TODO Need to add a air gap option
|
||||
# Make a temp location for the files
|
||||
mkdir /tmp/sogh
|
||||
@@ -16,19 +18,110 @@ clone_to_tmp() {
|
||||
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion.git
|
||||
git clone https://github.com/Security-Onion-Solutions/securityonion.git
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
show_file_changes() {
|
||||
local source_dir="$1"
|
||||
local dest_dir="$2"
|
||||
local dir_type="$3" # "salt" or "pillar"
|
||||
|
||||
if [ $VERBOSE -eq 0 ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "=== Changes for $dir_type directory ==="
|
||||
|
||||
# Find all files in source directory
|
||||
if [ -d "$source_dir" ]; then
|
||||
find "$source_dir" -type f | while read -r source_file; do
|
||||
# Get relative path
|
||||
rel_path="${source_file#$source_dir/}"
|
||||
dest_file="$dest_dir/$rel_path"
|
||||
|
||||
if [ ! -f "$dest_file" ]; then
|
||||
echo "ADDED: $dest_file"
|
||||
if [ $VERY_VERBOSE -eq 1 ]; then
|
||||
echo " (New file - showing first 20 lines)"
|
||||
head -n 20 "$source_file" | sed 's/^/ + /'
|
||||
echo ""
|
||||
fi
|
||||
elif ! cmp -s "$source_file" "$dest_file"; then
|
||||
echo "MODIFIED: $dest_file"
|
||||
if [ $VERY_VERBOSE -eq 1 ]; then
|
||||
echo " (Changes:)"
|
||||
diff -u "$dest_file" "$source_file" | sed 's/^/ /'
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Find deleted files (exist in dest but not in source)
|
||||
if [ -d "$dest_dir" ]; then
|
||||
find "$dest_dir" -type f | while read -r dest_file; do
|
||||
# Get relative path
|
||||
rel_path="${dest_file#$dest_dir/}"
|
||||
source_file="$source_dir/$rel_path"
|
||||
|
||||
if [ ! -f "$source_file" ]; then
|
||||
echo "DELETED: $dest_file"
|
||||
if [ $VERY_VERBOSE -eq 1 ]; then
|
||||
echo " (File was deleted)"
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
|
||||
# Copy new files over to the salt dir
|
||||
cd /tmp/sogh/securityonion
|
||||
git checkout $BRANCH
|
||||
VERSION=$(cat VERSION)
|
||||
|
||||
if [ $TEST_MODE -eq 1 ]; then
|
||||
echo "=== TEST MODE: Showing what would change without making changes ==="
|
||||
echo "Branch: $BRANCH"
|
||||
echo "Version: $VERSION"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Show changes before copying if verbose mode is enabled OR if in test mode
|
||||
if [ $VERBOSE -eq 1 ] || [ $TEST_MODE -eq 1 ]; then
|
||||
if [ $TEST_MODE -eq 1 ]; then
|
||||
# In test mode, force at least basic verbose output
|
||||
local old_verbose=$VERBOSE
|
||||
if [ $VERBOSE -eq 0 ]; then
|
||||
VERBOSE=1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Analyzing file changes..."
|
||||
show_file_changes "$(pwd)/salt" "$default_salt_dir/salt" "salt"
|
||||
show_file_changes "$(pwd)/pillar" "$default_salt_dir/pillar" "pillar"
|
||||
|
||||
if [ $TEST_MODE -eq 1 ] && [ $old_verbose -eq 0 ]; then
|
||||
# Restore original verbose setting
|
||||
VERBOSE=$old_verbose
|
||||
fi
|
||||
fi
|
||||
|
||||
# If in test mode, don't copy files
|
||||
if [ $TEST_MODE -eq 1 ]; then
|
||||
echo "=== TEST MODE: No files were modified ==="
|
||||
echo "To apply these changes, run without --test option"
|
||||
rm -rf /tmp/sogh
|
||||
return
|
||||
fi
|
||||
|
||||
# We need to overwrite if there is a repo file
|
||||
if [ -d /opt/so/repo ]; then
|
||||
tar -czf /opt/so/repo/"$VERSION".tar.gz -C "$(pwd)/.." .
|
||||
fi
|
||||
|
||||
rsync -a salt $default_salt_dir/
|
||||
rsync -a pillar $default_salt_dir/
|
||||
chown -R socore:socore $default_salt_dir/salt
|
||||
@@ -45,11 +138,64 @@ got_root(){
|
||||
fi
|
||||
}
|
||||
|
||||
got_root
|
||||
if [ $# -ne 1 ] ; then
|
||||
show_usage() {
|
||||
echo "Usage: $0 [-v] [-vv] [--test] [branch]"
|
||||
echo " -v Show verbose output (files changed/added/deleted)"
|
||||
echo " -vv Show very verbose output (includes file diffs)"
|
||||
echo " --test Test mode - show what would change without making changes"
|
||||
echo " branch Git branch to checkout (default: 2.4/main)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Normal operation"
|
||||
echo " $0 -v # Show which files change"
|
||||
echo " $0 -vv # Show files and their diffs"
|
||||
echo " $0 --test # See what would change (dry run)"
|
||||
echo " $0 --test -vv # Test mode with detailed diffs"
|
||||
echo " $0 -v dev-branch # Use specific branch with verbose output"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-v)
|
||||
VERBOSE=1
|
||||
shift
|
||||
;;
|
||||
-vv)
|
||||
VERBOSE=1
|
||||
VERY_VERBOSE=1
|
||||
shift
|
||||
;;
|
||||
--test)
|
||||
TEST_MODE=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_usage
|
||||
;;
|
||||
-*)
|
||||
echo "Unknown option $1"
|
||||
show_usage
|
||||
;;
|
||||
*)
|
||||
# This should be the branch name
|
||||
if [ -z "$BRANCH" ]; then
|
||||
BRANCH="$1"
|
||||
else
|
||||
echo "Too many arguments"
|
||||
show_usage
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Set default branch if not provided
|
||||
if [ -z "$BRANCH" ]; then
|
||||
BRANCH=2.4/main
|
||||
else
|
||||
BRANCH=$1
|
||||
fi
|
||||
|
||||
got_root
|
||||
clone_to_tmp
|
||||
copy_new_files
|
||||
|
||||
@@ -21,6 +21,9 @@ whiptail_title='Security Onion UPdater'
|
||||
NOTIFYCUSTOMELASTICCONFIG=false
|
||||
TOPFILE=/opt/so/saltstack/default/salt/top.sls
|
||||
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
||||
SALTUPGRADED=false
|
||||
SALT_CLOUD_INSTALLED=false
|
||||
SALT_CLOUD_CONFIGURED=false
|
||||
# used to display messages to the user at the end of soup
|
||||
declare -a FINAL_MESSAGE_QUEUE=()
|
||||
|
||||
@@ -627,6 +630,8 @@ post_to_2.4.190() {
|
||||
update_default_logstash_output
|
||||
fi
|
||||
fi
|
||||
# Apply new elasticsearch.server index template
|
||||
rollover_index "logs-elasticsearch.server-default"
|
||||
|
||||
POSTVERSION=2.4.190
|
||||
}
|
||||
@@ -1258,24 +1263,43 @@ upgrade_check_salt() {
|
||||
}
|
||||
|
||||
upgrade_salt() {
|
||||
SALTUPGRADED=True
|
||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||
echo ""
|
||||
# If rhel family
|
||||
if [[ $is_rpm ]]; then
|
||||
# Check if salt-cloud is installed
|
||||
if rpm -q salt-cloud &>/dev/null; then
|
||||
SALT_CLOUD_INSTALLED=true
|
||||
fi
|
||||
# Check if salt-cloud is configured
|
||||
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
|
||||
SALT_CLOUD_CONFIGURED=true
|
||||
fi
|
||||
|
||||
echo "Removing yum versionlock for Salt."
|
||||
echo ""
|
||||
yum versionlock delete "salt"
|
||||
yum versionlock delete "salt-minion"
|
||||
yum versionlock delete "salt-master"
|
||||
# Remove salt-cloud versionlock if installed
|
||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
||||
yum versionlock delete "salt-cloud"
|
||||
fi
|
||||
echo "Updating Salt packages."
|
||||
echo ""
|
||||
set +e
|
||||
# if oracle run with -r to ignore repos set by bootstrap
|
||||
if [[ $OS == 'oracle' ]]; then
|
||||
# Add -L flag only if salt-cloud is already installed
|
||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
||||
run_check_net_err \
|
||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -L -F -M stable \"$NEWSALTVERSION\"" \
|
||||
"Could not update salt, please check $SOUP_LOG for details."
|
||||
else
|
||||
run_check_net_err \
|
||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
|
||||
"Could not update salt, please check $SOUP_LOG for details."
|
||||
fi
|
||||
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
|
||||
else
|
||||
run_check_net_err \
|
||||
@@ -1288,6 +1312,10 @@ upgrade_salt() {
|
||||
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
|
||||
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
|
||||
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
|
||||
# Add salt-cloud versionlock if installed
|
||||
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
|
||||
yum versionlock add "salt-cloud-0:$NEWSALTVERSION-0.*"
|
||||
fi
|
||||
# Else do Ubuntu things
|
||||
elif [[ $is_deb ]]; then
|
||||
echo "Removing apt hold for Salt."
|
||||
@@ -1320,6 +1348,7 @@ upgrade_salt() {
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
SALTUPGRADED=true
|
||||
echo "Salt upgrade success."
|
||||
echo ""
|
||||
fi
|
||||
@@ -1563,6 +1592,11 @@ main() {
|
||||
# ensure the mine is updated and populated before highstates run, following the salt-master restart
|
||||
update_salt_mine
|
||||
|
||||
if [[ $SALT_CLOUD_CONFIGURED == true && $SALTUPGRADED == true ]]; then
|
||||
echo "Updating salt-cloud config to use the new Salt version"
|
||||
salt-call state.apply salt.cloud.config concurrent=True
|
||||
fi
|
||||
|
||||
enable_highstate
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -211,7 +211,7 @@ Exit Codes:
|
||||
|
||||
Logging:
|
||||
|
||||
- Logs are written to /opt/so/log/salt/so-salt-cloud.log.
|
||||
- Logs are written to /opt/so/log/salt/so-salt-cloud.
|
||||
- Both file and console logging are enabled for real-time monitoring.
|
||||
|
||||
"""
|
||||
@@ -233,7 +233,7 @@ local = salt.client.LocalClient()
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
file_handler = logging.FileHandler('/opt/so/log/salt/so-salt-cloud.log')
|
||||
file_handler = logging.FileHandler('/opt/so/log/salt/so-salt-cloud')
|
||||
console_handler = logging.StreamHandler()
|
||||
|
||||
formatter = logging.Formatter('%(asctime)s %(message)s')
|
||||
|
||||
@@ -14,7 +14,7 @@ sool9_{{host}}:
|
||||
private_key: /etc/ssh/auth_keys/soqemussh/id_ecdsa
|
||||
sudo: True
|
||||
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
|
||||
script_args: -r -F -x python3 stable 3006.9
|
||||
script_args: -r -F -x python3 stable {{ SALTVERSION }}
|
||||
minion:
|
||||
master: {{ grains.host }}
|
||||
master_port: 4506
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
{% if '.'.join(sls.split('.')[:2]) in allowed_states %}
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
|
||||
{% from 'salt/map.jinja' import SALTVERSION %}
|
||||
|
||||
{% if HYPERVISORS %}
|
||||
cloud_providers:
|
||||
@@ -20,7 +21,7 @@ cloud_providers:
|
||||
- name: /etc/salt/cloud.providers.d/libvirt.conf
|
||||
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
|
||||
- defaults:
|
||||
HYPERVISORS: {{HYPERVISORS}}
|
||||
HYPERVISORS: {{ HYPERVISORS }}
|
||||
- template: jinja
|
||||
- makedirs: True
|
||||
|
||||
@@ -29,11 +30,17 @@ cloud_profiles:
|
||||
- name: /etc/salt/cloud.profiles.d/socloud.conf
|
||||
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
|
||||
- defaults:
|
||||
HYPERVISORS: {{HYPERVISORS}}
|
||||
HYPERVISORS: {{ HYPERVISORS }}
|
||||
MANAGERHOSTNAME: {{ grains.host }}
|
||||
MANAGERIP: {{ pillar.host.mainip }}
|
||||
SALTVERSION: {{ SALTVERSION }}
|
||||
- template: jinja
|
||||
- makedirs: True
|
||||
{% else %}
|
||||
no_hypervisors_configured:
|
||||
test.succeed_without_changes:
|
||||
- name: no_hypervisors_configured
|
||||
- comment: No hypervisors are configured
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
@@ -117,7 +117,7 @@ Exit Codes:
|
||||
4: VM provisioning failure (so-salt-cloud execution failed)
|
||||
|
||||
Logging:
|
||||
Log files are written to /opt/so/log/salt/engines/virtual_node_manager.log
|
||||
Log files are written to /opt/so/log/salt/engines/virtual_node_manager
|
||||
Comprehensive logging includes:
|
||||
- Hardware validation details
|
||||
- PCI ID conversion process
|
||||
@@ -138,23 +138,49 @@ import pwd
|
||||
import grp
|
||||
import salt.config
|
||||
import salt.runner
|
||||
import salt.client
|
||||
from typing import Dict, List, Optional, Tuple, Any
|
||||
from datetime import datetime, timedelta
|
||||
from threading import Lock
|
||||
|
||||
# Get socore uid/gid
|
||||
SOCORE_UID = pwd.getpwnam('socore').pw_uid
|
||||
SOCORE_GID = grp.getgrnam('socore').gr_gid
|
||||
|
||||
# Initialize Salt runner once
|
||||
# Initialize Salt runner and local client once
|
||||
opts = salt.config.master_config('/etc/salt/master')
|
||||
opts['output'] = 'json'
|
||||
runner = salt.runner.RunnerClient(opts)
|
||||
local = salt.client.LocalClient()
|
||||
|
||||
# Get socore uid/gid for file ownership
|
||||
SOCORE_UID = pwd.getpwnam('socore').pw_uid
|
||||
SOCORE_GID = grp.getgrnam('socore').gr_gid
|
||||
|
||||
# Configure logging
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.DEBUG)
|
||||
|
||||
# Prevent propagation to parent loggers to avoid duplicate log entries
|
||||
log.propagate = False
|
||||
|
||||
# Add file handler for dedicated log file
|
||||
log_dir = '/opt/so/log/salt'
|
||||
log_file = os.path.join(log_dir, 'virtual_node_manager')
|
||||
|
||||
# Create log directory if it doesn't exist
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
# Create file handler
|
||||
file_handler = logging.FileHandler(log_file)
|
||||
file_handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Create formatter
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s [%(name)s:%(lineno)d][%(levelname)-8s][%(process)d] %(message)s',
|
||||
datefmt='%Y-%m-%d %H:%M:%S'
|
||||
)
|
||||
file_handler.setFormatter(formatter)
|
||||
|
||||
# Add handler to logger
|
||||
log.addHandler(file_handler)
|
||||
|
||||
# Constants
|
||||
DEFAULT_INTERVAL = 30
|
||||
DEFAULT_BASE_PATH = '/opt/so/saltstack/local/salt/hypervisor/hosts'
|
||||
@@ -203,6 +229,39 @@ def write_json_file(file_path: str, data: Any) -> None:
|
||||
except Exception as e:
|
||||
log.error("Failed to write JSON file %s: %s", file_path, str(e))
|
||||
raise
|
||||
def remove_vm_from_vms_file(vms_file_path: str, vm_hostname: str, vm_role: str) -> bool:
|
||||
"""
|
||||
Remove a VM entry from the hypervisorVMs file.
|
||||
|
||||
Args:
|
||||
vms_file_path: Path to the hypervisorVMs file
|
||||
vm_hostname: Hostname of the VM to remove (without role suffix)
|
||||
vm_role: Role of the VM
|
||||
|
||||
Returns:
|
||||
bool: True if VM was removed, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Read current VMs
|
||||
vms = read_json_file(vms_file_path)
|
||||
|
||||
# Find and remove the VM entry
|
||||
original_count = len(vms)
|
||||
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
|
||||
|
||||
if len(vms) < original_count:
|
||||
# VM was found and removed, write back to file
|
||||
write_json_file(vms_file_path, vms)
|
||||
log.info("Removed VM %s_%s from %s", vm_hostname, vm_role, vms_file_path)
|
||||
return True
|
||||
else:
|
||||
log.warning("VM %s_%s not found in %s", vm_hostname, vm_role, vms_file_path)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
log.error("Failed to remove VM %s_%s from %s: %s", vm_hostname, vm_role, vms_file_path, str(e))
|
||||
return False
|
||||
|
||||
|
||||
def read_yaml_file(file_path: str) -> dict:
|
||||
"""Read and parse a YAML file."""
|
||||
@@ -558,6 +617,13 @@ def mark_vm_failed(vm_file: str, error_code: int, message: str) -> None:
|
||||
# Remove the original file since we'll create an error file
|
||||
os.remove(vm_file)
|
||||
|
||||
# Clear hardware resource claims so failed VMs don't consume resources
|
||||
# Keep nsm_size for reference but clear cpu, memory, sfp, copper
|
||||
config.pop('cpu', None)
|
||||
config.pop('memory', None)
|
||||
config.pop('sfp', None)
|
||||
config.pop('copper', None)
|
||||
|
||||
# Create error file
|
||||
error_file = f"{vm_file}.error"
|
||||
data = {
|
||||
@@ -586,8 +652,16 @@ def mark_invalid_hardware(hypervisor_path: str, vm_name: str, config: dict, erro
|
||||
# Join all messages with proper sentence structure
|
||||
full_message = "Hardware validation failure: " + " ".join(error_messages)
|
||||
|
||||
# Clear hardware resource claims so failed VMs don't consume resources
|
||||
# Keep nsm_size for reference but clear cpu, memory, sfp, copper
|
||||
config_copy = config.copy()
|
||||
config_copy.pop('cpu', None)
|
||||
config_copy.pop('memory', None)
|
||||
config_copy.pop('sfp', None)
|
||||
config_copy.pop('copper', None)
|
||||
|
||||
data = {
|
||||
'config': config,
|
||||
'config': config_copy,
|
||||
'status': 'error',
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'error_details': {
|
||||
@@ -634,6 +708,61 @@ def validate_vrt_license() -> bool:
|
||||
log.error("Error reading license file: %s", str(e))
|
||||
return False
|
||||
|
||||
def check_hypervisor_disk_space(hypervisor: str, size_gb: int) -> Tuple[bool, Optional[str]]:
|
||||
"""
|
||||
Check if hypervisor has sufficient disk space for volume creation.
|
||||
|
||||
Args:
|
||||
hypervisor: Hypervisor hostname
|
||||
size_gb: Required size in GB
|
||||
|
||||
Returns:
|
||||
Tuple of (has_space, error_message)
|
||||
"""
|
||||
try:
|
||||
# Get hypervisor minion ID
|
||||
hypervisor_minion = f"{hypervisor}_hypervisor"
|
||||
|
||||
# Check disk space on /nsm/libvirt/volumes using LocalClient
|
||||
result = local.cmd(
|
||||
hypervisor_minion,
|
||||
'cmd.run',
|
||||
["df -BG /nsm/libvirt/volumes | tail -1 | awk '{print $4}' | sed 's/G//'"]
|
||||
)
|
||||
|
||||
if not result or hypervisor_minion not in result:
|
||||
log.error("Failed to check disk space on hypervisor %s", hypervisor)
|
||||
return False, "Failed to check disk space on hypervisor"
|
||||
|
||||
available_gb_str = result[hypervisor_minion].strip()
|
||||
if not available_gb_str:
|
||||
log.error("Empty disk space response from hypervisor %s", hypervisor)
|
||||
return False, "Failed to get disk space information"
|
||||
|
||||
try:
|
||||
available_gb = float(available_gb_str)
|
||||
except ValueError:
|
||||
log.error("Invalid disk space value from hypervisor %s: %s", hypervisor, available_gb_str)
|
||||
return False, f"Invalid disk space value: {available_gb_str}"
|
||||
|
||||
# Add 10% buffer for filesystem overhead
|
||||
required_gb = size_gb * 1.1
|
||||
|
||||
log.debug("Hypervisor %s disk space check: Available=%.2fGB, Required=%.2fGB",
|
||||
hypervisor, available_gb, required_gb)
|
||||
|
||||
if available_gb < required_gb:
|
||||
error_msg = f"Insufficient disk space on hypervisor {hypervisor}. Available: {available_gb:.2f}GB, Required: {required_gb:.2f}GB (including 10% overhead)"
|
||||
log.error(error_msg)
|
||||
return False, error_msg
|
||||
|
||||
log.info("Hypervisor %s has sufficient disk space for %dGB volume", hypervisor, size_gb)
|
||||
return True, None
|
||||
|
||||
except Exception as e:
|
||||
log.error("Error checking disk space on hypervisor %s: %s", hypervisor, str(e))
|
||||
return False, f"Error checking disk space: {str(e)}"
|
||||
|
||||
def process_vm_creation(hypervisor_path: str, vm_config: dict) -> None:
|
||||
"""
|
||||
Process a single VM creation request.
|
||||
@@ -695,6 +824,33 @@ def process_vm_creation(hypervisor_path: str, vm_config: dict) -> None:
|
||||
log.warning("VM: %s - Both disk and nsm_size specified. disk takes precedence, nsm_size will be ignored.",
|
||||
vm_name)
|
||||
|
||||
# Check disk space BEFORE creating VM if nsm_size is specified
|
||||
if has_nsm_size and not has_disk:
|
||||
size_gb = int(vm_config['nsm_size'])
|
||||
has_space, space_error = check_hypervisor_disk_space(hypervisor, size_gb)
|
||||
if not has_space:
|
||||
log.error("VM: %s - %s", vm_name, space_error)
|
||||
|
||||
# Send Hypervisor NSM Disk Full status event
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', vm_name,
|
||||
'-H', hypervisor,
|
||||
'-s', 'Hypervisor NSM Disk Full'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
log.error("Failed to emit volume create failed event for %s: %s", vm_name, str(e))
|
||||
|
||||
mark_invalid_hardware(
|
||||
hypervisor_path,
|
||||
vm_name,
|
||||
vm_config,
|
||||
{'disk_space': f"Insufficient disk space for {size_gb}GB volume: {space_error}"}
|
||||
)
|
||||
return
|
||||
log.debug("VM: %s - Hypervisor has sufficient space for %dGB volume", vm_name, size_gb)
|
||||
|
||||
# Initial hardware validation against model
|
||||
is_valid, errors = validate_hardware_request(model_config, vm_config)
|
||||
if not is_valid:
|
||||
@@ -967,12 +1123,21 @@ def process_hypervisor(hypervisor_path: str) -> None:
|
||||
if not nodes_config:
|
||||
log.debug("Empty VMs configuration in %s", vms_file)
|
||||
|
||||
# Get existing VMs
|
||||
# Get existing VMs and track failed VMs separately
|
||||
existing_vms = set()
|
||||
failed_vms = set() # VMs with .error files
|
||||
for file_path in glob.glob(os.path.join(hypervisor_path, '*_*')):
|
||||
basename = os.path.basename(file_path)
|
||||
# Skip error and status files
|
||||
if not basename.endswith('.error') and not basename.endswith('.status'):
|
||||
# Skip status files
|
||||
if basename.endswith('.status'):
|
||||
continue
|
||||
# Track VMs with .error files separately
|
||||
if basename.endswith('.error'):
|
||||
vm_name = basename[:-6] # Remove '.error' suffix
|
||||
failed_vms.add(vm_name)
|
||||
existing_vms.add(vm_name) # Also add to existing to prevent recreation
|
||||
log.debug(f"Found failed VM with .error file: {vm_name}")
|
||||
else:
|
||||
existing_vms.add(basename)
|
||||
|
||||
# Process new VMs
|
||||
@@ -989,12 +1154,37 @@ def process_hypervisor(hypervisor_path: str) -> None:
|
||||
# process_vm_creation handles its own locking
|
||||
process_vm_creation(hypervisor_path, vm_config)
|
||||
|
||||
# Process VM deletions
|
||||
# Process VM deletions (but skip failed VMs that only have .error files)
|
||||
vms_to_delete = existing_vms - configured_vms
|
||||
log.debug(f"Existing VMs: {existing_vms}")
|
||||
log.debug(f"Configured VMs: {configured_vms}")
|
||||
log.debug(f"Failed VMs: {failed_vms}")
|
||||
log.debug(f"VMs to delete: {vms_to_delete}")
|
||||
for vm_name in vms_to_delete:
|
||||
# Skip deletion if VM only has .error file (no actual VM to delete)
|
||||
if vm_name in failed_vms:
|
||||
error_file = os.path.join(hypervisor_path, f"{vm_name}.error")
|
||||
base_file = os.path.join(hypervisor_path, vm_name)
|
||||
# Only skip if there's no base file (VM never successfully created)
|
||||
if not os.path.exists(base_file):
|
||||
log.info(f"Skipping deletion of failed VM {vm_name} (VM never successfully created)")
|
||||
# Clean up the .error and .status files since VM is no longer configured
|
||||
if os.path.exists(error_file):
|
||||
os.remove(error_file)
|
||||
log.info(f"Removed .error file for unconfigured VM: {vm_name}")
|
||||
status_file = os.path.join(hypervisor_path, f"{vm_name}.status")
|
||||
if os.path.exists(status_file):
|
||||
os.remove(status_file)
|
||||
log.info(f"Removed .status file for unconfigured VM: {vm_name}")
|
||||
|
||||
# Trigger hypervisor annotation update to reflect the removal
|
||||
try:
|
||||
log.info(f"Triggering hypervisor annotation update after removing failed VM: {vm_name}")
|
||||
runner.cmd('state.orch', ['orch.dyanno_hypervisor'])
|
||||
except Exception as e:
|
||||
log.error(f"Failed to trigger hypervisor annotation update for {vm_name}: {str(e)}")
|
||||
|
||||
continue
|
||||
log.info(f"Initiating deletion process for VM: {vm_name}")
|
||||
process_vm_deletion(hypervisor_path, vm_name)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
master:
|
||||
version: '3006.9'
|
||||
version: '3006.16'
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
minion:
|
||||
version: '3006.9'
|
||||
version: '3006.16'
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
|
||||
@@ -5,6 +5,12 @@ sensoroni:
|
||||
enabled: False
|
||||
timeout_ms: 900000
|
||||
parallel_limit: 5
|
||||
export:
|
||||
timeout_ms: 1200000
|
||||
cache_refresh_interval_ms: 10000
|
||||
export_metric_limit: 10000
|
||||
export_event_limit: 10000
|
||||
csv_separator: ','
|
||||
node_checkin_interval_ms: 10000
|
||||
sensoronikey:
|
||||
soc_host:
|
||||
|
||||
@@ -21,7 +21,13 @@
|
||||
},
|
||||
{%- endif %}
|
||||
"importer": {},
|
||||
"export": {},
|
||||
"export": {
|
||||
"timeoutMs": {{ SENSORONIMERGED.config.export.timeout_ms }},
|
||||
"cacheRefreshIntervalMs": {{ SENSORONIMERGED.config.export.cache_refresh_interval_ms }},
|
||||
"exportMetricLimit": {{ SENSORONIMERGED.config.export.export_metric_limit }},
|
||||
"exportEventLimit": {{ SENSORONIMERGED.config.export.export_event_limit }},
|
||||
"csvSeparator": "{{ SENSORONIMERGED.config.export.csv_separator }}"
|
||||
},
|
||||
"statickeyauth": {
|
||||
"apiKey": "{{ GLOBALS.sensoroni_key }}"
|
||||
{% if GLOBALS.is_sensor %}
|
||||
|
||||
@@ -17,6 +17,27 @@ sensoroni:
|
||||
description: Parallel limit for the analyzer.
|
||||
advanced: True
|
||||
helpLink: cases.html
|
||||
export:
|
||||
timeout_ms:
|
||||
description: Timeout period for the exporter to finish export-related tasks.
|
||||
advanced: True
|
||||
helpLink: reports.html
|
||||
cache_refresh_interval_ms:
|
||||
description: Refresh interval for cache updates. Longer intervals result in less compute usage but risks stale data included in reports.
|
||||
advanced: True
|
||||
helpLink: reports.html
|
||||
export_metric_limit:
|
||||
description: Maximum number of metric values to include in each metric aggregation group.
|
||||
advanced: True
|
||||
helpLink: reports.html
|
||||
export_event_limit:
|
||||
description: Maximum number of events to include per event list.
|
||||
advanced: True
|
||||
helpLink: reports.html
|
||||
csv_separator:
|
||||
description: Separator character to use for CSV exports.
|
||||
advanced: False
|
||||
helpLink: reports.html
|
||||
node_checkin_interval_ms:
|
||||
description: Interval in ms to checkin to the soc_host.
|
||||
advanced: True
|
||||
|
||||
@@ -1494,6 +1494,8 @@ soc:
|
||||
assistant:
|
||||
apiUrl: https://onionai.securityonion.net
|
||||
healthTimeoutSeconds: 3
|
||||
systemPromptAddendum: ""
|
||||
systemPromptAddendumMaxLength: 50000
|
||||
salt:
|
||||
queueDir: /opt/sensoroni/queue
|
||||
timeoutMs: 45000
|
||||
@@ -1636,6 +1638,9 @@ soc:
|
||||
- name: socExcludeToggle
|
||||
filter: 'NOT event.module:"soc"'
|
||||
enabled: true
|
||||
- name: onionaiExcludeToggle
|
||||
filter: 'NOT _index:"*:so-assistant-*"'
|
||||
enabled: true
|
||||
queries:
|
||||
- name: Default Query
|
||||
description: Show all events grouped by the observer host
|
||||
@@ -2547,9 +2552,22 @@ soc:
|
||||
assistant:
|
||||
enabled: false
|
||||
investigationPrompt: Investigate Alert ID {socId}
|
||||
contextLimitSmall: 200000
|
||||
contextLimitLarge: 1000000
|
||||
thresholdColorRatioLow: 0.5
|
||||
thresholdColorRatioMed: 0.75
|
||||
thresholdColorRatioMax: 1
|
||||
availableModels:
|
||||
- id: sonnet-4
|
||||
displayName: Claude Sonnet 4
|
||||
contextLimitSmall: 200000
|
||||
contextLimitLarge: 1000000
|
||||
lowBalanceColorAlert: 500000
|
||||
- id: sonnet-4.5
|
||||
displayName: Claude Sonnet 4.5
|
||||
contextLimitSmall: 200000
|
||||
contextLimitLarge: 1000000
|
||||
lowBalanceColorAlert: 500000
|
||||
- id: gptoss-120b
|
||||
displayName: GPT-OSS 120B
|
||||
contextLimitSmall: 128000
|
||||
contextLimitLarge: 128000
|
||||
lowBalanceColorAlert: 500000
|
||||
@@ -3,11 +3,14 @@
|
||||
{# Define the list of process steps in order (case-sensitive) #}
|
||||
{% set PROCESS_STEPS = [
|
||||
'Processing',
|
||||
'Hypervisor NSM Disk Full',
|
||||
'IP Configuration',
|
||||
'Starting Create',
|
||||
'Executing Deploy Script',
|
||||
'Initialize Minion Pillars',
|
||||
'Created Instance',
|
||||
'Volume Creation',
|
||||
'Volume Configuration',
|
||||
'Hardware Configuration',
|
||||
'Highstate Initiated',
|
||||
'Destroyed Instance'
|
||||
|
||||
51
salt/soc/dyanno/hypervisor/remove_failed_vm.sls
Normal file
51
salt/soc/dyanno/hypervisor/remove_failed_vm.sls
Normal file
@@ -0,0 +1,51 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
{% do salt.log.info('soc/dyanno/hypervisor/remove_failed_vm: Running') %}
|
||||
{% set vm_name = pillar.get('vm_name') %}
|
||||
{% set hypervisor = pillar.get('hypervisor') %}
|
||||
|
||||
{% if vm_name and hypervisor %}
|
||||
{% set vm_parts = vm_name.split('_') %}
|
||||
{% if vm_parts | length >= 2 %}
|
||||
{% set vm_role = vm_parts[-1] %}
|
||||
{% set vm_hostname = '_'.join(vm_parts[:-1]) %}
|
||||
{% set vms_file = '/opt/so/saltstack/local/salt/hypervisor/hosts/' ~ hypervisor ~ 'VMs' %}
|
||||
|
||||
{% do salt.log.info('soc/dyanno/hypervisor/remove_failed_vm: Removing VM ' ~ vm_name ~ ' from ' ~ vms_file) %}
|
||||
|
||||
remove_vm_{{ vm_name }}_from_vms_file:
|
||||
module.run:
|
||||
- name: hypervisor.remove_vm_from_vms_file
|
||||
- vms_file_path: {{ vms_file }}
|
||||
- vm_hostname: {{ vm_hostname }}
|
||||
- vm_role: {{ vm_role }}
|
||||
|
||||
{% else %}
|
||||
{% do salt.log.error('soc/dyanno/hypervisor/remove_failed_vm: Invalid vm_name format: ' ~ vm_name) %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% do salt.log.error('soc/dyanno/hypervisor/remove_failed_vm: Missing required pillar data (vm_name or hypervisor)') %}
|
||||
{% endif %}
|
||||
|
||||
{% do salt.log.info('soc/dyanno/hypervisor/remove_failed_vm: Completed') %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% do salt.log.error(
|
||||
'Hypervisor nodes are a feature supported only for customers with a valid license. '
|
||||
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com '
|
||||
'for more information about purchasing a license to enable this feature.'
|
||||
) %}
|
||||
|
||||
{% endif %}
|
||||
@@ -13,7 +13,6 @@
|
||||
|
||||
{%- import_yaml 'soc/dyanno/hypervisor/hypervisor.yaml' as ANNOTATION -%}
|
||||
{%- from 'hypervisor/map.jinja' import HYPERVISORS -%}
|
||||
{%- from 'soc/dyanno/hypervisor/map.jinja' import PROCESS_STEPS -%}
|
||||
|
||||
{%- set TEMPLATE = ANNOTATION.hypervisor.hosts.pop('defaultHost') -%}
|
||||
|
||||
@@ -27,7 +26,6 @@
|
||||
{%- if baseDomainStatus == 'Initialized' %}
|
||||
{%- if vm_list %}
|
||||
#### Virtual Machines
|
||||
Status values: {% for step in PROCESS_STEPS %}{{ step }}{% if not loop.last %}, {% endif %}{% endfor %}. "Last Updated" shows when status changed. After "Highstate Initiated", only "Destroyed Instance" updates the timestamp.
|
||||
|
||||
| Name | Status | CPU Cores | Memory (GB)| Disk | Copper | SFP | Last Updated |
|
||||
|--------------------|--------------------|-----------|------------|------|--------|------|---------------------|
|
||||
@@ -42,7 +40,6 @@ Status values: {% for step in PROCESS_STEPS %}{{ step }}{% if not loop.last %},
|
||||
{%- endfor %}
|
||||
{%- else %}
|
||||
#### Virtual Machines
|
||||
Status values: {% for step in PROCESS_STEPS %}{{ step }}{% if not loop.last %}, {% endif %}{% endfor %}. "Last Updated" shows when status changed. After "Highstate Initiated", only "Destroyed Instance" updates the timestamp.
|
||||
|
||||
No Virtual Machines Found
|
||||
{%- endif %}
|
||||
@@ -96,9 +93,21 @@ Base domain has not been initialized.
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
|
||||
{# Calculate available resources #}
|
||||
{%- set cpu_free = hw_config.cpu - ns.used_cpu -%}
|
||||
{%- set mem_free = hw_config.memory - ns.used_memory -%}
|
||||
{# Determine host OS overhead based on role #}
|
||||
{%- if role == 'hypervisor' -%}
|
||||
{%- set host_os_cpu = 8 -%}
|
||||
{%- set host_os_memory = 16 -%}
|
||||
{%- elif role == 'managerhype' -%}
|
||||
{%- set host_os_cpu = 16 -%}
|
||||
{%- set host_os_memory = 32 -%}
|
||||
{%- else -%}
|
||||
{%- set host_os_cpu = 0 -%}
|
||||
{%- set host_os_memory = 0 -%}
|
||||
{%- endif -%}
|
||||
|
||||
{# Calculate available resources (subtract both VM usage and host OS overhead) #}
|
||||
{%- set cpu_free = hw_config.cpu - ns.used_cpu - host_os_cpu -%}
|
||||
{%- set mem_free = hw_config.memory - ns.used_memory - host_os_memory -%}
|
||||
|
||||
{# Get used PCI indices #}
|
||||
{%- set used_disk = [] -%}
|
||||
|
||||
@@ -589,6 +589,15 @@ soc:
|
||||
description: Timeout in seconds for the Onion AI health check.
|
||||
global: True
|
||||
advanced: True
|
||||
systemPromptAddendum:
|
||||
description: Additional context to provide to the AI assistant about this SOC deployment. This can include information about your environment, policies, or any other relevant details that can help the AI provide more accurate and tailored assistance. Long prompts may be shortened.
|
||||
global: True
|
||||
advanced: False
|
||||
multiline: True
|
||||
systemPromptAddendumMaxLength:
|
||||
description: Maximum length of the system prompt addendum. Longer prompts will be truncated.
|
||||
global: True
|
||||
advanced: True
|
||||
client:
|
||||
assistant:
|
||||
enabled:
|
||||
@@ -597,14 +606,6 @@ soc:
|
||||
investigationPrompt:
|
||||
description: Prompt given to Onion AI when beginning an investigation.
|
||||
global: True
|
||||
contextLimitSmall:
|
||||
description: Smaller context limit for Onion AI.
|
||||
global: True
|
||||
advanced: True
|
||||
contextLimitLarge:
|
||||
description: Larger context limit for Onion AI.
|
||||
global: True
|
||||
advanced: True
|
||||
thresholdColorRatioLow:
|
||||
description: Lower visual context color change threshold.
|
||||
global: True
|
||||
@@ -621,6 +622,32 @@ soc:
|
||||
description: Onion AI credit amount at which balance turns red.
|
||||
global: True
|
||||
advanced: True
|
||||
availableModels:
|
||||
description: List of AI models available for use in SOC as well as model specific warning thresholds.
|
||||
global: True
|
||||
advanced: True
|
||||
forcedType: "[]{}"
|
||||
helpLink: assistant.html
|
||||
syntax: json
|
||||
uiElements:
|
||||
- field: id
|
||||
label: Model ID
|
||||
required: True
|
||||
- field: displayName
|
||||
label: Display Name
|
||||
required: True
|
||||
- field: contextLimitSmall
|
||||
label: Context Limit (Small)
|
||||
forcedType: int
|
||||
required: True
|
||||
- field: contextLimitLarge
|
||||
label: Context Limit (Large)
|
||||
forcedType: int
|
||||
required: True
|
||||
- field: lowBalanceColorAlert
|
||||
label: Low Balance Color Alert
|
||||
forcedType: int
|
||||
required: True
|
||||
apiTimeoutMs:
|
||||
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
|
||||
global: True
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
LOG_FILE="/opt/so/log/so-nsm-mount-nvme.log"
|
||||
LOG_FILE="/opt/so/log/so-nsm-mount-nvme"
|
||||
VG_NAME=""
|
||||
LV_NAME="nsm"
|
||||
MOUNT_POINT="/nsm"
|
||||
|
||||
@@ -55,11 +55,11 @@
|
||||
# - Mount operation failed
|
||||
#
|
||||
# Logging:
|
||||
# - All operations logged to /opt/so/log/so-nsm-mount-virtio.log
|
||||
# - All operations logged to /opt/so/log/so-nsm-mount-virtio
|
||||
|
||||
set -e
|
||||
|
||||
LOG_FILE="/opt/so/log/so-nsm-mount-virtio.log"
|
||||
LOG_FILE="/opt/so/log/so-nsm-mount-virtio"
|
||||
DEVICE="/dev/vdb"
|
||||
MOUNT_POINT="/nsm"
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ include:
|
||||
|
||||
strelka_filestream:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-filestream:{{ GLOBALS.so_version }}
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
|
||||
- binds:
|
||||
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
|
||||
- /nsm/strelka:/nsm/strelka
|
||||
|
||||
@@ -14,7 +14,7 @@ include:
|
||||
|
||||
strelka_frontend:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-frontend:{{ GLOBALS.so_version }}
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
|
||||
- binds:
|
||||
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
|
||||
- /nsm/strelka/log/:/var/log/strelka/:rw
|
||||
|
||||
@@ -337,4 +337,5 @@
|
||||
]
|
||||
data_format = "influx"
|
||||
interval = "1h"
|
||||
timeout = "120s"
|
||||
{%- endif %}
|
||||
|
||||
@@ -1646,6 +1646,12 @@ reserve_ports() {
|
||||
fi
|
||||
}
|
||||
|
||||
clear_previous_setup_results() {
|
||||
# Disregard previous setup outcomes.
|
||||
rm -f /root/failure
|
||||
rm -f /root/success
|
||||
}
|
||||
|
||||
reinstall_init() {
|
||||
info "Putting system in state to run setup again"
|
||||
|
||||
@@ -1657,10 +1663,6 @@ reinstall_init() {
|
||||
|
||||
local service_retry_count=20
|
||||
|
||||
# Disregard previous install outcomes
|
||||
rm -f /root/failure
|
||||
rm -f /root/success
|
||||
|
||||
{
|
||||
# remove all of root's cronjobs
|
||||
logCmd "crontab -r -u root"
|
||||
@@ -2305,7 +2307,7 @@ set_redirect() {
|
||||
|
||||
set_timezone() {
|
||||
|
||||
logCmd "timedatectl set-timezone Etc/UTC"
|
||||
timedatectl set-timezone Etc/UTC
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -132,6 +132,10 @@ if [[ -f /root/accept_changes ]]; then
|
||||
reset_proxy
|
||||
fi
|
||||
|
||||
# Previous setup attempts, even if setup doesn't actually start the installation,
|
||||
# can leave behind results that may interfere with the current setup attempt.
|
||||
clear_previous_setup_results
|
||||
|
||||
title "Parsing Username for Install"
|
||||
parse_install_username
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ log_has_errors() {
|
||||
grep -vE "Command failed with exit code" | \
|
||||
grep -vE "Running scope as unit" | \
|
||||
grep -vE "securityonion-resources/sigma/stable" | \
|
||||
grep -vE "remove_failed_vm.sls" | \
|
||||
grep -vE "log-.*-pipeline_failed_attempts" &> "$error_log"
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
|
||||
BIN
sigs/securityonion-2.4.190-20251024.iso.sig
Normal file
BIN
sigs/securityonion-2.4.190-20251024.iso.sig
Normal file
Binary file not shown.
Reference in New Issue
Block a user