mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge pull request #15122 from Security-Onion-Solutions/amv
nsm virtual disk and new nsm_total grain
This commit is contained in:
@@ -7,12 +7,14 @@
|
||||
|
||||
"""
|
||||
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
|
||||
for modifying network configurations within QCOW2 images and adjusting virtual machine hardware settings.
|
||||
It serves as a Salt interface to the so-qcow2-modify-network and so-kvm-modify-hardware scripts.
|
||||
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and
|
||||
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network,
|
||||
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
|
||||
|
||||
The module offers two main capabilities:
|
||||
The module offers three main capabilities:
|
||||
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
|
||||
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
|
||||
3. Volume Management: Create and attach virtual storage volumes for NSM data
|
||||
|
||||
This module is intended to work with Security Onion's virtualization infrastructure and is typically
|
||||
used in conjunction with salt-cloud for VM provisioning and management.
|
||||
@@ -244,3 +246,90 @@ def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
def create_volume_config(vm_name, size_gb, start=False):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
|
||||
|
||||
Options:
|
||||
vm_name
|
||||
Name of the virtual machine to attach the volume to
|
||||
size_gb
|
||||
Volume size in GB (positive integer)
|
||||
This determines the capacity of the virtual storage volume
|
||||
start
|
||||
Boolean flag to start the VM after volume creation
|
||||
Optional - defaults to False
|
||||
|
||||
Examples:
|
||||
1. **Create 500GB Volume:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
|
||||
```
|
||||
This creates a 500GB virtual volume for NSM storage
|
||||
|
||||
2. **Create 1TB Volume and Start VM:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
|
||||
```
|
||||
This creates a 1TB volume and starts the VM after attachment
|
||||
|
||||
Notes:
|
||||
- VM must be stopped before volume creation
|
||||
- Volume is created as a qcow2 image and attached to the VM
|
||||
- This is an alternative to disk passthrough via modify_hardware_config
|
||||
- Volume is automatically attached to the VM's libvirt configuration
|
||||
- Requires so-kvm-create-volume script to be installed
|
||||
- Volume files are stored in the hypervisor's VM storage directory
|
||||
|
||||
Description:
|
||||
This function creates and attaches a virtual storage volume to a KVM virtual machine
|
||||
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
|
||||
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
|
||||
This provides an alternative to physical disk passthrough, allowing flexible storage
|
||||
allocation without requiring dedicated hardware. The VM can optionally be started
|
||||
after the volume is successfully created and attached.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters
|
||||
2: VM state error (running when should be stopped)
|
||||
3: Volume creation error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Volume creation and attachment operations are logged
|
||||
- Errors include detailed messages and stack traces
|
||||
- Final status of volume creation is logged
|
||||
'''
|
||||
|
||||
# Validate size_gb parameter
|
||||
if not isinstance(size_gb, int) or size_gb <= 0:
|
||||
raise ValueError('size_gb must be a positive integer.')
|
||||
|
||||
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
|
||||
|
||||
if start:
|
||||
cmd.append('-S')
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
21
salt/common/grains.sls
Normal file
21
salt/common/grains.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
|
||||
{% if nsm_exists %}
|
||||
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
|
||||
|
||||
nsm_total:
|
||||
grains.present:
|
||||
- name: nsm_total
|
||||
- value: {{ nsm_total }}
|
||||
|
||||
{% else %}
|
||||
|
||||
nsm_missing:
|
||||
test.succeed_without_changes:
|
||||
- name: /nsm does not exist, skipping grain assignment
|
||||
|
||||
{% endif %}
|
||||
@@ -4,6 +4,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- common.grains
|
||||
- common.packages
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- manager.elasticsearch # needed for elastic_curl_config state
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.5.4\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.5.4\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.5.4\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
|
||||
@@ -15,8 +15,21 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
function update_logstash_outputs() {
|
||||
# Generate updated JSON payload
|
||||
JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
|
||||
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
||||
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||
fi
|
||||
fi
|
||||
|
||||
# Update Logstash Outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
|
||||
@@ -127,7 +127,7 @@ JSON_STRING=$( jq -n \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}'
|
||||
)
|
||||
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to create logstash fleet output"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 8.18.6
|
||||
version: 8.18.8
|
||||
index_clean: true
|
||||
config:
|
||||
action:
|
||||
|
||||
@@ -30,7 +30,9 @@
|
||||
#
|
||||
# WARNING: This script will DESTROY all data on the target drives!
|
||||
#
|
||||
# USAGE: sudo ./so-nvme-raid1.sh
|
||||
# USAGE:
|
||||
# sudo ./so-nvme-raid1.sh # Normal operation
|
||||
# sudo ./so-nvme-raid1.sh --force-cleanup # Force cleanup of existing RAID
|
||||
#
|
||||
#################################################################
|
||||
|
||||
@@ -41,6 +43,19 @@ set -e
|
||||
RAID_ARRAY_NAME="md0"
|
||||
RAID_DEVICE="/dev/${RAID_ARRAY_NAME}"
|
||||
MOUNT_POINT="/nsm"
|
||||
FORCE_CLEANUP=false
|
||||
|
||||
# Parse command line arguments
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--force-cleanup)
|
||||
FORCE_CLEANUP=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Function to log messages
|
||||
log() {
|
||||
@@ -55,6 +70,91 @@ check_root() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to force cleanup all RAID components
|
||||
force_cleanup_raid() {
|
||||
log "=== FORCE CLEANUP MODE ==="
|
||||
log "This will destroy all RAID configurations and data on target drives!"
|
||||
|
||||
# Stop all MD arrays
|
||||
log "Stopping all MD arrays"
|
||||
mdadm --stop --scan 2>/dev/null || true
|
||||
|
||||
# Wait for arrays to stop
|
||||
sleep 2
|
||||
|
||||
# Remove any running md devices
|
||||
for md in /dev/md*; do
|
||||
if [ -b "$md" ]; then
|
||||
log "Stopping $md"
|
||||
mdadm --stop "$md" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Force cleanup both NVMe drives
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
log "Force cleaning $device"
|
||||
|
||||
# Kill any processes using the device
|
||||
fuser -k "${device}"* 2>/dev/null || true
|
||||
|
||||
# Unmount any mounted partitions
|
||||
for part in "${device}"*; do
|
||||
if [ -b "$part" ]; then
|
||||
umount -f "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Force zero RAID superblocks on partitions
|
||||
for part in "${device}"p*; do
|
||||
if [ -b "$part" ]; then
|
||||
log "Zeroing RAID superblock on $part"
|
||||
mdadm --zero-superblock --force "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Zero superblock on the device itself
|
||||
log "Zeroing RAID superblock on $device"
|
||||
mdadm --zero-superblock --force "$device" 2>/dev/null || true
|
||||
|
||||
# Remove LVM physical volumes
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
|
||||
# Wipe all filesystem and partition signatures
|
||||
log "Wiping all signatures from $device"
|
||||
wipefs -af "$device" 2>/dev/null || true
|
||||
|
||||
# Overwrite the beginning of the drive (partition table area)
|
||||
log "Clearing partition table on $device"
|
||||
dd if=/dev/zero of="$device" bs=1M count=10 2>/dev/null || true
|
||||
|
||||
# Clear the end of the drive (backup partition table area)
|
||||
local device_size=$(blockdev --getsz "$device" 2>/dev/null || echo "0")
|
||||
if [ "$device_size" -gt 0 ]; then
|
||||
dd if=/dev/zero of="$device" bs=512 seek=$(( device_size - 2048 )) count=2048 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Force kernel to re-read partition table
|
||||
blockdev --rereadpt "$device" 2>/dev/null || true
|
||||
partprobe -s "$device" 2>/dev/null || true
|
||||
done
|
||||
|
||||
# Clear mdadm configuration
|
||||
log "Clearing mdadm configuration"
|
||||
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||
|
||||
# Remove any fstab entries for the RAID device or mount point
|
||||
log "Cleaning fstab entries"
|
||||
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
|
||||
sed -i "\|${MOUNT_POINT}|d" /etc/fstab
|
||||
|
||||
# Wait for system to settle
|
||||
udevadm settle
|
||||
sleep 5
|
||||
|
||||
log "Force cleanup complete!"
|
||||
log "Proceeding with RAID setup..."
|
||||
}
|
||||
|
||||
# Function to find MD arrays using specific devices
|
||||
find_md_arrays_using_devices() {
|
||||
local target_devices=("$@")
|
||||
@@ -205,10 +305,15 @@ check_existing_raid() {
|
||||
fi
|
||||
|
||||
log "Error: $device appears to be part of an existing RAID array"
|
||||
log "To reuse this device, you must first:"
|
||||
log "1. Unmount any filesystems"
|
||||
log "2. Stop the RAID array: mdadm --stop $array_name"
|
||||
log "3. Zero the superblock: mdadm --zero-superblock ${device}p1"
|
||||
log "Old RAID metadata detected but array is not running."
|
||||
log ""
|
||||
log "To fix this, run the script with --force-cleanup:"
|
||||
log " sudo $0 --force-cleanup"
|
||||
log ""
|
||||
log "Or manually clean up with:"
|
||||
log "1. Stop any arrays: mdadm --stop --scan"
|
||||
log "2. Zero superblocks: mdadm --zero-superblock --force ${device}p1"
|
||||
log "3. Wipe signatures: wipefs -af $device"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
@@ -238,7 +343,7 @@ ensure_devices_free() {
|
||||
done
|
||||
|
||||
# Clear MD superblock
|
||||
mdadm --zero-superblock "${device}"* 2>/dev/null || true
|
||||
mdadm --zero-superblock --force "${device}"* 2>/dev/null || true
|
||||
|
||||
# Remove LVM PV if exists
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
@@ -263,6 +368,11 @@ main() {
|
||||
# Check if running as root
|
||||
check_root
|
||||
|
||||
# If force cleanup flag is set, do aggressive cleanup first
|
||||
if [ "$FORCE_CLEANUP" = true ]; then
|
||||
force_cleanup_raid
|
||||
fi
|
||||
|
||||
# Check for existing RAID setup
|
||||
check_existing_raid
|
||||
|
||||
|
||||
586
salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume
Normal file
586
salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume
Normal file
@@ -0,0 +1,586 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
"""
|
||||
Script for creating and attaching virtual volumes to KVM virtual machines for NSM storage.
|
||||
This script provides functionality to create pre-allocated raw disk images and attach them
|
||||
to VMs as virtio-blk devices for high-performance network security monitoring data storage.
|
||||
|
||||
The script handles the complete volume lifecycle:
|
||||
1. Volume Creation: Creates pre-allocated raw disk images using qemu-img
|
||||
2. Volume Attachment: Attaches volumes to VMs as virtio-blk devices
|
||||
3. VM Management: Stops/starts VMs as needed during the process
|
||||
|
||||
This script is designed to work with Security Onion's virtualization infrastructure and is typically
|
||||
used during VM provisioning to add dedicated NSM storage volumes.
|
||||
|
||||
**Usage:**
|
||||
so-kvm-create-volume -v <vm_name> -s <size_gb> [-S]
|
||||
|
||||
**Options:**
|
||||
-v, --vm Name of the virtual machine to attach the volume to (required).
|
||||
-s, --size Size of the volume in GB (required, must be a positive integer).
|
||||
-S, --start Start the VM after volume creation and attachment (optional).
|
||||
|
||||
**Examples:**
|
||||
|
||||
1. **Create and Attach 500GB Volume:**
|
||||
|
||||
```bash
|
||||
so-kvm-create-volume -v vm1_sensor -s 500
|
||||
```
|
||||
|
||||
This command creates and attaches a volume with the following settings:
|
||||
- VM Name: `vm1_sensor`
|
||||
- Volume Size: `500` GB
|
||||
- Volume Path: `/nsm/libvirt/volumes/vm1_sensor-nsm.img`
|
||||
- Device: `/dev/vdb` (virtio-blk)
|
||||
- VM remains stopped after attachment
|
||||
|
||||
2. **Create Volume and Start VM:**
|
||||
|
||||
```bash
|
||||
so-kvm-create-volume -v vm2_sensor -s 1000 -S
|
||||
```
|
||||
|
||||
This command creates a volume and starts the VM:
|
||||
- VM Name: `vm2_sensor`
|
||||
- Volume Size: `1000` GB (1 TB)
|
||||
- VM is started after volume attachment due to the `-S` flag
|
||||
|
||||
3. **Create Large Volume for Heavy Traffic:**
|
||||
|
||||
```bash
|
||||
so-kvm-create-volume -v vm3_sensor -s 2000 -S
|
||||
```
|
||||
|
||||
This command creates a large volume for high-traffic environments:
|
||||
- VM Name: `vm3_sensor`
|
||||
- Volume Size: `2000` GB (2 TB)
|
||||
- VM is started after attachment
|
||||
|
||||
**Notes:**
|
||||
|
||||
- The script automatically stops the VM if it's running before creating and attaching the volume.
|
||||
- Volumes are created with full pre-allocation for optimal performance.
|
||||
- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `<vm_name>-nsm.img`.
|
||||
- Volumes are attached as `/dev/vdb` using virtio-blk for high performance.
|
||||
- The script checks available disk space before creating the volume.
|
||||
- Ownership is set to `socore:socore` with permissions `644`.
|
||||
- Without the `-S` flag, the VM remains stopped after volume attachment.
|
||||
|
||||
**Description:**
|
||||
|
||||
The `so-kvm-create-volume` script creates and attaches NSM storage volumes using the following process:
|
||||
|
||||
1. **Pre-flight Checks:**
|
||||
- Validates input parameters (VM name, size)
|
||||
- Checks available disk space in `/nsm/libvirt/volumes/`
|
||||
- Ensures sufficient space for the requested volume size
|
||||
|
||||
2. **VM State Management:**
|
||||
- Connects to the local libvirt daemon
|
||||
- Stops the VM if it's currently running
|
||||
- Retrieves current VM configuration
|
||||
|
||||
3. **Volume Creation:**
|
||||
- Creates volume directory if it doesn't exist
|
||||
- Uses `qemu-img create` with full pre-allocation
|
||||
- Sets proper ownership (socore:socore) and permissions (644)
|
||||
- Validates volume creation success
|
||||
|
||||
4. **Volume Attachment:**
|
||||
- Modifies VM's libvirt XML configuration
|
||||
- Adds disk element with virtio-blk driver
|
||||
- Configures cache='none' and io='native' for performance
|
||||
- Attaches volume as `/dev/vdb`
|
||||
|
||||
5. **VM Redefinition:**
|
||||
- Applies the new configuration by redefining the VM
|
||||
- Optionally starts the VM if requested
|
||||
- Emits deployment status events for monitoring
|
||||
|
||||
6. **Error Handling:**
|
||||
- Validates all input parameters
|
||||
- Checks disk space before creation
|
||||
- Handles volume creation failures
|
||||
- Handles volume attachment failures
|
||||
- Provides detailed error messages for troubleshooting
|
||||
|
||||
**Exit Codes:**
|
||||
|
||||
- `0`: Success
|
||||
- `1`: An error occurred during execution
|
||||
|
||||
**Logging:**
|
||||
|
||||
- Logs are written to `/opt/so/log/hypervisor/so-kvm-create-volume.log`
|
||||
- Both file and console logging are enabled for real-time monitoring
|
||||
- Log entries include timestamps and severity levels
|
||||
- Log prefixes: VOLUME:, VM:, HARDWARE:, SPACE:
|
||||
- Detailed error messages are logged for troubleshooting
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
import libvirt
|
||||
import logging
|
||||
import socket
|
||||
import subprocess
|
||||
import pwd
|
||||
import grp
|
||||
import xml.etree.ElementTree as ET
|
||||
from io import StringIO
|
||||
from so_vm_utils import start_vm, stop_vm
|
||||
from so_logging_utils import setup_logging
|
||||
|
||||
# Get hypervisor name from local hostname
|
||||
HYPERVISOR = socket.gethostname()
|
||||
|
||||
# Volume storage directory
|
||||
VOLUME_DIR = '/nsm/libvirt/volumes'
|
||||
|
||||
# Custom exception classes
|
||||
class InsufficientSpaceError(Exception):
|
||||
"""Raised when there is insufficient disk space for volume creation."""
|
||||
pass
|
||||
|
||||
class VolumeCreationError(Exception):
|
||||
"""Raised when volume creation fails."""
|
||||
pass
|
||||
|
||||
class VolumeAttachmentError(Exception):
|
||||
"""Raised when volume attachment fails."""
|
||||
pass
|
||||
|
||||
# Custom log handler to capture output
|
||||
class StringIOHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.strio = StringIO()
|
||||
|
||||
def emit(self, record):
|
||||
msg = self.format(record)
|
||||
self.strio.write(msg + '\n')
|
||||
|
||||
def get_value(self):
|
||||
return self.strio.getvalue()
|
||||
|
||||
def parse_arguments():
|
||||
"""Parse command-line arguments."""
|
||||
parser = argparse.ArgumentParser(description='Create and attach a virtual volume to a KVM virtual machine for NSM storage.')
|
||||
parser.add_argument('-v', '--vm', required=True, help='Name of the virtual machine to attach the volume to.')
|
||||
parser.add_argument('-s', '--size', type=int, required=True, help='Size of the volume in GB (must be a positive integer).')
|
||||
parser.add_argument('-S', '--start', action='store_true', help='Start the VM after volume creation and attachment.')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate size is positive
|
||||
if args.size <= 0:
|
||||
parser.error("Volume size must be a positive integer.")
|
||||
|
||||
return args
|
||||
|
||||
def check_disk_space(size_gb, logger):
|
||||
"""
|
||||
Check if there is sufficient disk space available for volume creation.
|
||||
|
||||
Args:
|
||||
size_gb: Size of the volume in GB
|
||||
logger: Logger instance
|
||||
|
||||
Raises:
|
||||
InsufficientSpaceError: If there is not enough disk space
|
||||
"""
|
||||
try:
|
||||
stat = os.statvfs(VOLUME_DIR)
|
||||
# Available space in bytes
|
||||
available_bytes = stat.f_bavail * stat.f_frsize
|
||||
# Required space in bytes (add 10% buffer)
|
||||
required_bytes = size_gb * 1024 * 1024 * 1024 * 1.1
|
||||
|
||||
available_gb = available_bytes / (1024 * 1024 * 1024)
|
||||
required_gb = required_bytes / (1024 * 1024 * 1024)
|
||||
|
||||
logger.info(f"SPACE: Available: {available_gb:.2f} GB, Required: {required_gb:.2f} GB")
|
||||
|
||||
if available_bytes < required_bytes:
|
||||
raise InsufficientSpaceError(
|
||||
f"Insufficient disk space. Available: {available_gb:.2f} GB, Required: {required_gb:.2f} GB"
|
||||
)
|
||||
|
||||
logger.info(f"SPACE: Sufficient disk space available for {size_gb} GB volume")
|
||||
|
||||
except OSError as e:
|
||||
logger.error(f"SPACE: Failed to check disk space: {e}")
|
||||
raise
|
||||
|
||||
def create_volume_file(vm_name, size_gb, logger):
|
||||
"""
|
||||
Create a pre-allocated raw disk image for the VM.
|
||||
|
||||
Args:
|
||||
vm_name: Name of the VM
|
||||
size_gb: Size of the volume in GB
|
||||
logger: Logger instance
|
||||
|
||||
Returns:
|
||||
Path to the created volume file
|
||||
|
||||
Raises:
|
||||
VolumeCreationError: If volume creation fails
|
||||
"""
|
||||
# Define volume path (directory already created in main())
|
||||
volume_path = os.path.join(VOLUME_DIR, f"{vm_name}-nsm.img")
|
||||
|
||||
# Check if volume already exists
|
||||
if os.path.exists(volume_path):
|
||||
logger.error(f"VOLUME: Volume already exists: {volume_path}")
|
||||
raise VolumeCreationError(f"Volume already exists: {volume_path}")
|
||||
|
||||
logger.info(f"VOLUME: Creating {size_gb} GB volume at {volume_path}")
|
||||
|
||||
# Create volume using qemu-img with full pre-allocation
|
||||
try:
|
||||
cmd = [
|
||||
'qemu-img', 'create',
|
||||
'-f', 'raw',
|
||||
'-o', 'preallocation=full',
|
||||
volume_path,
|
||||
f"{size_gb}G"
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
|
||||
logger.info(f"VOLUME: Volume created successfully")
|
||||
if result.stdout:
|
||||
logger.debug(f"VOLUME: qemu-img output: {result.stdout.strip()}")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"VOLUME: Failed to create volume: {e}")
|
||||
if e.stderr:
|
||||
logger.error(f"VOLUME: qemu-img error: {e.stderr.strip()}")
|
||||
raise VolumeCreationError(f"Failed to create volume: {e}")
|
||||
|
||||
# Set ownership to socore:socore
|
||||
try:
|
||||
socore_uid = pwd.getpwnam('socore').pw_uid
|
||||
socore_gid = grp.getgrnam('socore').gr_gid
|
||||
os.chown(volume_path, socore_uid, socore_gid)
|
||||
logger.info(f"VOLUME: Set ownership to socore:socore")
|
||||
except (KeyError, OSError) as e:
|
||||
logger.error(f"VOLUME: Failed to set ownership: {e}")
|
||||
raise VolumeCreationError(f"Failed to set ownership: {e}")
|
||||
|
||||
# Set permissions to 644
|
||||
try:
|
||||
os.chmod(volume_path, 0o644)
|
||||
logger.info(f"VOLUME: Set permissions to 644")
|
||||
except OSError as e:
|
||||
logger.error(f"VOLUME: Failed to set permissions: {e}")
|
||||
raise VolumeCreationError(f"Failed to set permissions: {e}")
|
||||
|
||||
# Verify volume was created
|
||||
if not os.path.exists(volume_path):
|
||||
logger.error(f"VOLUME: Volume file not found after creation: {volume_path}")
|
||||
raise VolumeCreationError(f"Volume file not found after creation: {volume_path}")
|
||||
|
||||
volume_size = os.path.getsize(volume_path)
|
||||
logger.info(f"VOLUME: Volume created: {volume_path} ({volume_size} bytes)")
|
||||
|
||||
return volume_path
|
||||
|
||||
def attach_volume_to_vm(conn, vm_name, volume_path, logger):
|
||||
"""
|
||||
Attach the volume to the VM's libvirt XML configuration.
|
||||
|
||||
Args:
|
||||
conn: Libvirt connection
|
||||
vm_name: Name of the VM
|
||||
volume_path: Path to the volume file
|
||||
logger: Logger instance
|
||||
|
||||
Raises:
|
||||
VolumeAttachmentError: If volume attachment fails
|
||||
"""
|
||||
try:
|
||||
# Get the VM domain
|
||||
dom = conn.lookupByName(vm_name)
|
||||
|
||||
# Get the XML description of the VM
|
||||
xml_desc = dom.XMLDesc()
|
||||
root = ET.fromstring(xml_desc)
|
||||
|
||||
# Find the devices element
|
||||
devices_elem = root.find('./devices')
|
||||
if devices_elem is None:
|
||||
logger.error("VM: Could not find <devices> element in XML")
|
||||
raise VolumeAttachmentError("Could not find <devices> element in VM XML")
|
||||
|
||||
# Log ALL devices with PCI addresses to find conflicts
|
||||
logger.info("DISK_DEBUG: Examining ALL devices with PCI addresses")
|
||||
for device in devices_elem:
|
||||
address = device.find('./address')
|
||||
if address is not None and address.get('type') == 'pci':
|
||||
bus = address.get('bus', 'unknown')
|
||||
slot = address.get('slot', 'unknown')
|
||||
function = address.get('function', 'unknown')
|
||||
logger.info(f"DISK_DEBUG: Device {device.tag}: bus={bus}, slot={slot}, function={function}")
|
||||
|
||||
# Log existing disk configuration for debugging
|
||||
logger.info("DISK_DEBUG: Examining existing disk configuration")
|
||||
existing_disks = devices_elem.findall('./disk')
|
||||
for idx, disk in enumerate(existing_disks):
|
||||
target = disk.find('./target')
|
||||
source = disk.find('./source')
|
||||
address = disk.find('./address')
|
||||
|
||||
dev_name = target.get('dev') if target is not None else 'unknown'
|
||||
source_file = source.get('file') if source is not None else 'unknown'
|
||||
|
||||
if address is not None:
|
||||
slot = address.get('slot', 'unknown')
|
||||
bus = address.get('bus', 'unknown')
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, slot={slot}, bus={bus}")
|
||||
else:
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, no address element")
|
||||
|
||||
# Check if vdb already exists
|
||||
for disk in devices_elem.findall('./disk'):
|
||||
target = disk.find('./target')
|
||||
if target is not None and target.get('dev') == 'vdb':
|
||||
logger.error("VM: Device vdb already exists in VM configuration")
|
||||
raise VolumeAttachmentError("Device vdb already exists in VM configuration")
|
||||
|
||||
logger.info(f"VM: Attaching volume to {vm_name} as /dev/vdb")
|
||||
|
||||
# Create disk element
|
||||
disk_elem = ET.SubElement(devices_elem, 'disk', attrib={
|
||||
'type': 'file',
|
||||
'device': 'disk'
|
||||
})
|
||||
|
||||
# Add driver element
|
||||
ET.SubElement(disk_elem, 'driver', attrib={
|
||||
'name': 'qemu',
|
||||
'type': 'raw',
|
||||
'cache': 'none',
|
||||
'io': 'native'
|
||||
})
|
||||
|
||||
# Add source element
|
||||
ET.SubElement(disk_elem, 'source', attrib={
|
||||
'file': volume_path
|
||||
})
|
||||
|
||||
# Add target element
|
||||
ET.SubElement(disk_elem, 'target', attrib={
|
||||
'dev': 'vdb',
|
||||
'bus': 'virtio'
|
||||
})
|
||||
|
||||
# Add address element
|
||||
# Use bus 0x07 with slot 0x00 to ensure NSM volume appears after OS disk (which is on bus 0x04)
|
||||
# Bus 0x05 is used by memballoon, bus 0x06 is used by rng device
|
||||
# Libvirt requires slot <= 0 for non-zero buses
|
||||
# This ensures vda = OS disk, vdb = NSM volume
|
||||
ET.SubElement(disk_elem, 'address', attrib={
|
||||
'type': 'pci',
|
||||
'domain': '0x0000',
|
||||
'bus': '0x07',
|
||||
'slot': '0x00',
|
||||
'function': '0x0'
|
||||
})
|
||||
|
||||
logger.info(f"HARDWARE: Added disk configuration for vdb")
|
||||
|
||||
# Log disk ordering after adding new disk
|
||||
logger.info("DISK_DEBUG: Disk configuration after adding NSM volume")
|
||||
all_disks = devices_elem.findall('./disk')
|
||||
for idx, disk in enumerate(all_disks):
|
||||
target = disk.find('./target')
|
||||
source = disk.find('./source')
|
||||
address = disk.find('./address')
|
||||
|
||||
dev_name = target.get('dev') if target is not None else 'unknown'
|
||||
source_file = source.get('file') if source is not None else 'unknown'
|
||||
|
||||
if address is not None:
|
||||
slot = address.get('slot', 'unknown')
|
||||
bus = address.get('bus', 'unknown')
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, slot={slot}, bus={bus}")
|
||||
else:
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, no address element")
|
||||
|
||||
# Convert XML back to string
|
||||
new_xml_desc = ET.tostring(root, encoding='unicode')
|
||||
|
||||
# Redefine the VM with the new XML
|
||||
conn.defineXML(new_xml_desc)
|
||||
logger.info(f"VM: VM redefined with volume attached")
|
||||
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"VM: Failed to attach volume: {e}")
|
||||
raise VolumeAttachmentError(f"Failed to attach volume: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"VM: Failed to attach volume: {e}")
|
||||
raise VolumeAttachmentError(f"Failed to attach volume: {e}")
|
||||
|
||||
def emit_status_event(vm_name, status):
|
||||
"""
|
||||
Emit a deployment status event.
|
||||
|
||||
Args:
|
||||
vm_name: Name of the VM
|
||||
status: Status message
|
||||
"""
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', status
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# Don't fail the entire operation if status event fails
|
||||
pass
|
||||
|
||||
def main():
|
||||
"""Main function to orchestrate volume creation and attachment."""
|
||||
# Set up logging using the so_logging_utils library
|
||||
string_handler = StringIOHandler()
|
||||
string_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||
logger = setup_logging(
|
||||
logger_name='so-kvm-create-volume',
|
||||
log_file_path='/opt/so/log/hypervisor/so-kvm-create-volume.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger.addHandler(string_handler)
|
||||
|
||||
vm_name = None
|
||||
|
||||
try:
|
||||
# Parse arguments
|
||||
args = parse_arguments()
|
||||
|
||||
vm_name = args.vm
|
||||
size_gb = args.size
|
||||
start_vm_flag = args.start
|
||||
|
||||
logger.info(f"VOLUME: Starting volume creation for VM '{vm_name}' with size {size_gb} GB")
|
||||
|
||||
# Emit start status event
|
||||
emit_status_event(vm_name, 'Volume Creation')
|
||||
|
||||
# Ensure volume directory exists before checking disk space
|
||||
try:
|
||||
os.makedirs(VOLUME_DIR, mode=0o755, exist_ok=True)
|
||||
socore_uid = pwd.getpwnam('socore').pw_uid
|
||||
socore_gid = grp.getgrnam('socore').gr_gid
|
||||
os.chown(VOLUME_DIR, socore_uid, socore_gid)
|
||||
logger.debug(f"VOLUME: Ensured volume directory exists: {VOLUME_DIR}")
|
||||
except Exception as e:
|
||||
logger.error(f"VOLUME: Failed to create volume directory: {e}")
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
# Check disk space
|
||||
check_disk_space(size_gb, logger)
|
||||
|
||||
# Connect to libvirt
|
||||
try:
|
||||
conn = libvirt.open(None)
|
||||
logger.info("VM: Connected to libvirt")
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"VM: Failed to open connection to libvirt: {e}")
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
# Stop VM if running
|
||||
dom = stop_vm(conn, vm_name, logger)
|
||||
|
||||
# Create volume file
|
||||
volume_path = create_volume_file(vm_name, size_gb, logger)
|
||||
|
||||
# Attach volume to VM
|
||||
attach_volume_to_vm(conn, vm_name, volume_path, logger)
|
||||
|
||||
# Start VM if -S or --start argument is provided
|
||||
if start_vm_flag:
|
||||
dom = conn.lookupByName(vm_name)
|
||||
start_vm(dom, logger)
|
||||
logger.info(f"VM: VM '{vm_name}' started successfully")
|
||||
else:
|
||||
logger.info("VM: Start flag not provided; VM will remain stopped")
|
||||
|
||||
# Close connection
|
||||
conn.close()
|
||||
|
||||
# Emit success status event
|
||||
emit_status_event(vm_name, 'Volume Configuration')
|
||||
|
||||
logger.info(f"VOLUME: Volume creation and attachment completed successfully for VM '{vm_name}'")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
error_msg = "Operation cancelled by user"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except InsufficientSpaceError as e:
|
||||
error_msg = f"SPACE: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except VolumeCreationError as e:
|
||||
error_msg = f"VOLUME: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except VolumeAttachmentError as e:
|
||||
error_msg = f"VM: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"An error occurred: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
{%- else -%}
|
||||
|
||||
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||
for more information about purchasing a license to enable this feature."
|
||||
|
||||
{% endif -%}
|
||||
@@ -22,7 +22,7 @@ kibana:
|
||||
- default
|
||||
- file
|
||||
migrations:
|
||||
discardCorruptObjects: "8.18.6"
|
||||
discardCorruptObjects: "8.18.8"
|
||||
telemetry:
|
||||
enabled: False
|
||||
security:
|
||||
|
||||
@@ -422,6 +422,7 @@ preupgrade_changes() {
|
||||
[[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160
|
||||
[[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170
|
||||
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
|
||||
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
|
||||
true
|
||||
}
|
||||
|
||||
@@ -602,9 +603,6 @@ post_to_2.4.170() {
|
||||
}
|
||||
|
||||
post_to_2.4.180() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
# Force update to Kafka output policy
|
||||
/usr/sbin/so-kafka-fleet-output-policy --force
|
||||
|
||||
@@ -612,11 +610,24 @@ post_to_2.4.180() {
|
||||
}
|
||||
|
||||
post_to_2.4.190() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
# Only need to update import / eval nodes
|
||||
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
|
||||
update_import_fleet_output
|
||||
fi
|
||||
|
||||
# Check if expected default policy is logstash (global.pipeline is REDIS or "")
|
||||
pipeline=$(lookup_pillar "pipeline" "global")
|
||||
if [[ -z "$pipeline" ]] || [[ "$pipeline" == "REDIS" ]]; then
|
||||
# Check if this grid is currently affected by corrupt fleet output policy
|
||||
if elastic-agent status | grep "config: key file not configured" > /dev/null 2>&1; then
|
||||
echo "Elastic Agent shows an ssl error connecting to logstash output. Updating output policy..."
|
||||
update_default_logstash_output
|
||||
fi
|
||||
fi
|
||||
|
||||
POSTVERSION=2.4.190
|
||||
}
|
||||
|
||||
@@ -876,14 +887,14 @@ up_to_2.4.170() {
|
||||
}
|
||||
|
||||
up_to_2.4.180() {
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
|
||||
echo "Nothing to do for 2.4.180"
|
||||
INSTALLEDVERSION=2.4.180
|
||||
}
|
||||
|
||||
up_to_2.4.190() {
|
||||
echo "Nothing to do for 2.4.190"
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
|
||||
INSTALLEDVERSION=2.4.190
|
||||
}
|
||||
|
||||
@@ -1173,6 +1184,31 @@ update_import_fleet_output() {
|
||||
fi
|
||||
}
|
||||
|
||||
update_default_logstash_output() {
|
||||
echo "Updating fleet logstash output policy grid-logstash"
|
||||
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||
# Keep already configured hosts for this update, subsequent host updates come from so-elastic-fleet-outputs-update
|
||||
HOSTS=$(echo "$logstash_policy" | jq -r '.item.hosts')
|
||||
DEFAULT_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default')
|
||||
DEFAULT_MONITORING_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default_monitoring')
|
||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
JSON_STRING=$(jq -n \
|
||||
--argjson HOSTS "$HOSTS" \
|
||||
--arg DEFAULT_ENABLED "$DEFAULT_ENABLED" \
|
||||
--arg DEFAULT_MONITORING_ENABLED "$DEFAULT_MONITORING_ENABLED" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $HOSTS,"is_default": $DEFAULT_ENABLED,"is_default_monitoring": $DEFAULT_MONITORING_ENABLED,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }}}')
|
||||
fi
|
||||
|
||||
if curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --retry 3 --retry-delay 10 --fail; then
|
||||
echo "Successfully updated grid-logstash fleet output policy"
|
||||
fi
|
||||
}
|
||||
|
||||
update_salt_mine() {
|
||||
echo "Populating the mine with mine_functions for each host."
|
||||
set +e
|
||||
|
||||
@@ -516,23 +516,85 @@ def run_qcow2_modify_hardware_config(profile, vm_name, cpu=None, memory=None, pc
|
||||
target = hv_name + "_*"
|
||||
|
||||
try:
|
||||
args_list = [
|
||||
'vm_name=' + vm_name,
|
||||
'cpu=' + str(cpu) if cpu else '',
|
||||
'memory=' + str(memory) if memory else '',
|
||||
'start=' + str(start)
|
||||
]
|
||||
|
||||
args_list = ['vm_name=' + vm_name]
|
||||
|
||||
# Only add parameters that are actually specified
|
||||
if cpu is not None:
|
||||
args_list.append('cpu=' + str(cpu))
|
||||
if memory is not None:
|
||||
args_list.append('memory=' + str(memory))
|
||||
|
||||
# Add PCI devices if provided
|
||||
if pci_list:
|
||||
# Pass all PCI devices as a comma-separated list
|
||||
args_list.append('pci=' + ','.join(pci_list))
|
||||
|
||||
# Always add start parameter
|
||||
args_list.append('start=' + str(start))
|
||||
|
||||
result = local.cmd(target, 'qcow2.modify_hardware_config', args_list)
|
||||
format_qcow2_output('Hardware configuration', result)
|
||||
except Exception as e:
|
||||
logger.error(f"An error occurred while running qcow2.modify_hardware_config: {e}")
|
||||
|
||||
def run_qcow2_create_volume_config(profile, vm_name, size_gb, cpu=None, memory=None, start=False):
|
||||
"""Create a volume for the VM and optionally configure CPU/memory.
|
||||
|
||||
Args:
|
||||
profile (str): The cloud profile name
|
||||
vm_name (str): The name of the VM
|
||||
size_gb (int): Size of the volume in GB
|
||||
cpu (int, optional): Number of CPUs to assign
|
||||
memory (int, optional): Amount of memory in MiB
|
||||
start (bool): Whether to start the VM after configuration
|
||||
"""
|
||||
hv_name = profile.split('_')[1]
|
||||
target = hv_name + "_*"
|
||||
|
||||
try:
|
||||
# Step 1: Create the volume
|
||||
logger.info(f"Creating {size_gb}GB volume for VM {vm_name}")
|
||||
volume_result = local.cmd(
|
||||
target,
|
||||
'qcow2.create_volume_config',
|
||||
kwarg={
|
||||
'vm_name': vm_name,
|
||||
'size_gb': size_gb,
|
||||
'start': False # Don't start yet if we need to configure CPU/memory
|
||||
}
|
||||
)
|
||||
format_qcow2_output('Volume creation', volume_result)
|
||||
|
||||
# Step 2: Configure CPU and memory if specified
|
||||
if cpu or memory:
|
||||
logger.info(f"Configuring hardware for VM {vm_name}: CPU={cpu}, Memory={memory}MiB")
|
||||
hw_result = local.cmd(
|
||||
target,
|
||||
'qcow2.modify_hardware_config',
|
||||
kwarg={
|
||||
'vm_name': vm_name,
|
||||
'cpu': cpu,
|
||||
'memory': memory,
|
||||
'start': start
|
||||
}
|
||||
)
|
||||
format_qcow2_output('Hardware configuration', hw_result)
|
||||
elif start:
|
||||
# If no CPU/memory config needed but we need to start the VM
|
||||
logger.info(f"Starting VM {vm_name}")
|
||||
start_result = local.cmd(
|
||||
target,
|
||||
'qcow2.modify_hardware_config',
|
||||
kwarg={
|
||||
'vm_name': vm_name,
|
||||
'start': True
|
||||
}
|
||||
)
|
||||
format_qcow2_output('VM startup', start_result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"An error occurred while creating volume and configuring hardware: {e}")
|
||||
|
||||
def run_qcow2_modify_network_config(profile, vm_name, mode, ip=None, gateway=None, dns=None, search_domain=None):
|
||||
hv_name = profile.split('_')[1]
|
||||
target = hv_name + "_*"
|
||||
@@ -586,6 +648,7 @@ def parse_arguments():
|
||||
network_group.add_argument('-c', '--cpu', type=int, help='Number of virtual CPUs to assign.')
|
||||
network_group.add_argument('-m', '--memory', type=int, help='Amount of memory to assign in MiB.')
|
||||
network_group.add_argument('-P', '--pci', action='append', help='PCI hardware ID(s) to passthrough to the VM (e.g., 0000:c7:00.0). Can be specified multiple times.')
|
||||
network_group.add_argument('--nsm-size', type=int, help='Size in GB for NSM volume creation. Can be used with copper/sfp NICs (--pci). Only disk passthrough (without --nsm-size) prevents volume creation.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -621,6 +684,8 @@ def main():
|
||||
hw_config.append(f"{args.memory}MB RAM")
|
||||
if args.pci:
|
||||
hw_config.append(f"PCI devices: {', '.join(args.pci)}")
|
||||
if args.nsm_size:
|
||||
hw_config.append(f"NSM volume: {args.nsm_size}GB")
|
||||
hw_string = f" and hardware config: {', '.join(hw_config)}" if hw_config else ""
|
||||
|
||||
logger.info(f"Received request to create VM '{args.vm_name}' using profile '{args.profile}' {network_config}{hw_string}")
|
||||
@@ -643,8 +708,58 @@ def main():
|
||||
# Step 2: Provision the VM (without starting it)
|
||||
call_salt_cloud(args.profile, args.vm_name)
|
||||
|
||||
# Step 3: Modify hardware configuration
|
||||
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=args.cpu, memory=args.memory, pci_list=args.pci, start=True)
|
||||
# Step 3: Determine storage configuration approach
|
||||
# Priority: disk passthrough > volume creation (but volume can coexist with copper/sfp NICs)
|
||||
# Note: virtual_node_manager.py already filters out --nsm-size when disk is present,
|
||||
# so if both --pci and --nsm-size are present here, the PCI devices are copper/sfp NICs
|
||||
use_passthrough = False
|
||||
use_volume_creation = False
|
||||
has_nic_passthrough = False
|
||||
|
||||
if args.nsm_size:
|
||||
# Validate nsm_size
|
||||
if args.nsm_size <= 0:
|
||||
logger.error(f"Invalid nsm_size value: {args.nsm_size}. Must be a positive integer.")
|
||||
sys.exit(1)
|
||||
use_volume_creation = True
|
||||
logger.info(f"Using volume creation with size {args.nsm_size}GB (--nsm-size parameter specified)")
|
||||
|
||||
if args.pci:
|
||||
# If both nsm_size and PCI are present, PCI devices are copper/sfp NICs
|
||||
# (virtual_node_manager.py filters out nsm_size when disk is present)
|
||||
has_nic_passthrough = True
|
||||
logger.info(f"PCI devices (copper/sfp NICs) will be passed through along with volume: {', '.join(args.pci)}")
|
||||
elif args.pci:
|
||||
# Only PCI devices, no nsm_size - could be disk or NICs
|
||||
# this script is called by virtual_node_manager and that strips any possibility that nsm_size and the disk pci slot is sent to this script
|
||||
# we might have not specified a disk passthrough or nsm_size, but pass another pci slot and we end up here
|
||||
use_passthrough = True
|
||||
logger.info(f"Configuring PCI device passthrough.(--pci parameter specified without --nsm-size)")
|
||||
|
||||
# Step 4: Configure hardware based on storage approach
|
||||
if use_volume_creation:
|
||||
# Create volume first
|
||||
run_qcow2_create_volume_config(args.profile, args.vm_name, size_gb=args.nsm_size, cpu=args.cpu, memory=args.memory, start=False)
|
||||
|
||||
# Then configure NICs if present
|
||||
if has_nic_passthrough:
|
||||
logger.info(f"Configuring NIC passthrough for VM {args.vm_name}")
|
||||
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=None, memory=None, pci_list=args.pci, start=True)
|
||||
else:
|
||||
# No NICs, just start the VM
|
||||
logger.info(f"Starting VM {args.vm_name}")
|
||||
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=None, memory=None, pci_list=None, start=True)
|
||||
elif use_passthrough:
|
||||
# Use existing passthrough logic via modify_hardware_config
|
||||
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=args.cpu, memory=args.memory, pci_list=args.pci, start=True)
|
||||
else:
|
||||
# No storage configuration, just configure CPU/memory if specified
|
||||
if args.cpu or args.memory:
|
||||
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=args.cpu, memory=args.memory, pci_list=None, start=True)
|
||||
else:
|
||||
# No hardware configuration needed, just start the VM
|
||||
logger.info(f"No hardware configuration specified, starting VM {args.vm_name}")
|
||||
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=None, memory=None, pci_list=None, start=True)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.error("so-salt-cloud: Operation cancelled by user.")
|
||||
|
||||
@@ -161,6 +161,7 @@ DEFAULT_BASE_PATH = '/opt/so/saltstack/local/salt/hypervisor/hosts'
|
||||
VALID_ROLES = ['sensor', 'searchnode', 'idh', 'receiver', 'heavynode', 'fleet']
|
||||
LICENSE_PATH = '/opt/so/saltstack/local/pillar/soc/license.sls'
|
||||
DEFAULTS_PATH = '/opt/so/saltstack/default/salt/hypervisor/defaults.yaml'
|
||||
HYPERVISOR_PILLAR_PATH = '/opt/so/saltstack/local/pillar/hypervisor/soc_hypervisor.sls'
|
||||
# Define the retention period for destroyed VMs (in hours)
|
||||
DESTROYED_VM_RETENTION_HOURS = 48
|
||||
|
||||
@@ -295,16 +296,48 @@ def get_hypervisor_model(hypervisor: str) -> str:
|
||||
raise
|
||||
|
||||
def load_hardware_defaults(model: str) -> dict:
|
||||
"""Load hardware configuration from defaults.yaml."""
|
||||
"""Load hardware configuration from defaults.yaml and optionally override with pillar configuration."""
|
||||
config = None
|
||||
config_source = None
|
||||
|
||||
try:
|
||||
# First, try to load from defaults.yaml
|
||||
log.debug("Checking for model %s in %s", model, DEFAULTS_PATH)
|
||||
defaults = read_yaml_file(DEFAULTS_PATH)
|
||||
if not defaults or 'hypervisor' not in defaults:
|
||||
raise ValueError("Invalid defaults.yaml structure")
|
||||
if 'model' not in defaults['hypervisor']:
|
||||
raise ValueError("No model configurations found in defaults.yaml")
|
||||
if model not in defaults['hypervisor']['model']:
|
||||
raise ValueError(f"Model {model} not found in defaults.yaml")
|
||||
return defaults['hypervisor']['model'][model]
|
||||
|
||||
# Check if model exists in defaults
|
||||
if model in defaults['hypervisor']['model']:
|
||||
config = defaults['hypervisor']['model'][model]
|
||||
config_source = DEFAULTS_PATH
|
||||
log.debug("Found model %s in %s", model, DEFAULTS_PATH)
|
||||
|
||||
# Then, try to load from pillar file (if it exists)
|
||||
try:
|
||||
log.debug("Checking for model %s in %s", model, HYPERVISOR_PILLAR_PATH)
|
||||
pillar_config = read_yaml_file(HYPERVISOR_PILLAR_PATH)
|
||||
if pillar_config and 'hypervisor' in pillar_config:
|
||||
if 'model' in pillar_config['hypervisor']:
|
||||
if model in pillar_config['hypervisor']['model']:
|
||||
# Override with pillar configuration
|
||||
config = pillar_config['hypervisor']['model'][model]
|
||||
config_source = HYPERVISOR_PILLAR_PATH
|
||||
log.debug("Found model %s in %s (overriding defaults)", model, HYPERVISOR_PILLAR_PATH)
|
||||
except FileNotFoundError:
|
||||
log.debug("Pillar file %s not found, using defaults only", HYPERVISOR_PILLAR_PATH)
|
||||
except Exception as e:
|
||||
log.warning("Failed to read pillar file %s: %s (using defaults)", HYPERVISOR_PILLAR_PATH, str(e))
|
||||
|
||||
# If model was not found in either file, raise an error
|
||||
if config is None:
|
||||
raise ValueError(f"Model {model} not found in {DEFAULTS_PATH} or {HYPERVISOR_PILLAR_PATH}")
|
||||
|
||||
log.debug("Using hardware configuration for model %s from %s", model, config_source)
|
||||
return config
|
||||
|
||||
except Exception as e:
|
||||
log.error("Failed to load hardware defaults: %s", str(e))
|
||||
raise
|
||||
@@ -633,6 +666,35 @@ def process_vm_creation(hypervisor_path: str, vm_config: dict) -> None:
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit success status event: {e}")
|
||||
|
||||
# Validate nsm_size if present
|
||||
if 'nsm_size' in vm_config:
|
||||
try:
|
||||
size = int(vm_config['nsm_size'])
|
||||
if size <= 0:
|
||||
log.error("VM: %s - nsm_size must be a positive integer, got: %d", vm_name, size)
|
||||
mark_invalid_hardware(hypervisor_path, vm_name, vm_config,
|
||||
{'nsm_size': 'Invalid nsm_size: must be positive integer'})
|
||||
return
|
||||
if size > 10000: # 10TB reasonable maximum
|
||||
log.error("VM: %s - nsm_size %dGB exceeds reasonable maximum (10000GB)", vm_name, size)
|
||||
mark_invalid_hardware(hypervisor_path, vm_name, vm_config,
|
||||
{'nsm_size': f'Invalid nsm_size: {size}GB exceeds maximum (10000GB)'})
|
||||
return
|
||||
log.debug("VM: %s - nsm_size validated: %dGB", vm_name, size)
|
||||
except (ValueError, TypeError) as e:
|
||||
log.error("VM: %s - nsm_size must be a valid integer, got: %s", vm_name, vm_config.get('nsm_size'))
|
||||
mark_invalid_hardware(hypervisor_path, vm_name, vm_config,
|
||||
{'nsm_size': 'Invalid nsm_size: must be valid integer'})
|
||||
return
|
||||
|
||||
# Check for conflicting storage configurations
|
||||
has_disk = 'disk' in vm_config and vm_config['disk']
|
||||
has_nsm_size = 'nsm_size' in vm_config and vm_config['nsm_size']
|
||||
|
||||
if has_disk and has_nsm_size:
|
||||
log.warning("VM: %s - Both disk and nsm_size specified. disk takes precedence, nsm_size will be ignored.",
|
||||
vm_name)
|
||||
|
||||
# Initial hardware validation against model
|
||||
is_valid, errors = validate_hardware_request(model_config, vm_config)
|
||||
if not is_valid:
|
||||
@@ -668,6 +730,11 @@ def process_vm_creation(hypervisor_path: str, vm_config: dict) -> None:
|
||||
if 'memory' in vm_config:
|
||||
memory_mib = int(vm_config['memory']) * 1024
|
||||
cmd.extend(['-m', str(memory_mib)])
|
||||
|
||||
# Add nsm_size if specified and disk is not specified
|
||||
if 'nsm_size' in vm_config and vm_config['nsm_size'] and not ('disk' in vm_config and vm_config['disk']):
|
||||
cmd.extend(['--nsm-size', str(vm_config['nsm_size'])])
|
||||
log.debug("VM: %s - Adding nsm_size parameter: %s", vm_name, vm_config['nsm_size'])
|
||||
|
||||
# Add PCI devices
|
||||
for hw_type in ['disk', 'copper', 'sfp']:
|
||||
|
||||
@@ -63,18 +63,22 @@ hypervisor:
|
||||
required: true
|
||||
readonly: true
|
||||
forcedType: int
|
||||
- field: nsm_size
|
||||
label: "Size of virtual disk to create and use for /nsm, in GB. Only applicable if no pass-through disk."
|
||||
forcedType: int
|
||||
readonly: true
|
||||
- field: disk
|
||||
label: "Disk(s) for passthrough. Free: FREE | Total: TOTAL"
|
||||
label: "Disk(s) to pass through for /nsm. Free: FREE | Total: TOTAL"
|
||||
readonly: true
|
||||
options: []
|
||||
forcedType: '[]int'
|
||||
- field: copper
|
||||
label: "Copper port(s) for passthrough. Free: FREE | Total: TOTAL"
|
||||
label: "Copper port(s) to pass through. Free: FREE | Total: TOTAL"
|
||||
readonly: true
|
||||
options: []
|
||||
forcedType: '[]int'
|
||||
- field: sfp
|
||||
label: "SFP port(s) for passthrough. Free: FREE | Total: TOTAL"
|
||||
label: "SFP port(s) to pass through. Free: FREE | Total: TOTAL"
|
||||
readonly: true
|
||||
options: []
|
||||
forcedType: '[]int'
|
||||
|
||||
@@ -237,10 +237,22 @@ function manage_salt() {
|
||||
|
||||
case "$op" in
|
||||
state)
|
||||
log "Performing '$op' for '$state' on minion '$minion'"
|
||||
state=$(echo "$request" | jq -r .state)
|
||||
response=$(salt --async "$minion" state.apply "$state" queue=2)
|
||||
async=$(echo "$request" | jq -r .async)
|
||||
if [[ $async == "true" ]]; then
|
||||
log "Performing async '$op' on minion $minion with state '$state'"
|
||||
response=$(salt --async "$minion" state.apply "$state" queue=2)
|
||||
else
|
||||
log "Performing '$op' on minion $minion with state '$state'"
|
||||
response=$(salt "$minion" state.apply "$state")
|
||||
fi
|
||||
|
||||
exit_code=$?
|
||||
if [[ $exit_code -ne 0 && "$response" =~ "is running as PID" ]]; then
|
||||
log "Salt already running: $response ($exit_code)"
|
||||
respond "$id" "ERROR_SALT_ALREADY_RUNNING"
|
||||
return
|
||||
fi
|
||||
;;
|
||||
highstate)
|
||||
log "Performing '$op' on minion $minion"
|
||||
@@ -259,7 +271,7 @@ function manage_salt() {
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ exit_code -eq 0 ]]; then
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
log "Successful command execution: $response"
|
||||
respond "$id" "true"
|
||||
else
|
||||
|
||||
@@ -4,10 +4,17 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
{% set nvme_devices = salt['cmd.shell']("find /dev -name 'nvme*n1' 2>/dev/null") %}
|
||||
{% set nvme_devices = salt['cmd.shell']("ls /dev/nvme*n1 2>/dev/null || echo ''") %}
|
||||
{% set virtio_devices = salt['cmd.shell']("test -b /dev/vdb && echo '/dev/vdb' || echo ''") %}
|
||||
|
||||
{% if nvme_devices %}
|
||||
|
||||
include:
|
||||
- storage.nsm_mount
|
||||
- storage.nsm_mount_nvme
|
||||
|
||||
{% elif virtio_devices %}
|
||||
|
||||
include:
|
||||
- storage.nsm_mount_virtio
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -22,8 +22,8 @@ storage_nsm_mount_logdir:
|
||||
# Install the NSM mount script
|
||||
storage_nsm_mount_script:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-nsm-mount
|
||||
- source: salt://storage/tools/sbin/so-nsm-mount
|
||||
- name: /usr/sbin/so-nsm-mount-nvme
|
||||
- source: salt://storage/tools/sbin/so-nsm-mount-nvme
|
||||
- mode: 755
|
||||
- user: root
|
||||
- group: root
|
||||
@@ -34,7 +34,7 @@ storage_nsm_mount_script:
|
||||
# Execute the mount script if not already mounted
|
||||
storage_nsm_mount_execute:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-nsm-mount
|
||||
- name: /usr/sbin/so-nsm-mount-nvme
|
||||
- unless: mountpoint -q /nsm
|
||||
- require:
|
||||
- file: storage_nsm_mount_script
|
||||
39
salt/storage/nsm_mount_virtio.sls
Normal file
39
salt/storage/nsm_mount_virtio.sls
Normal file
@@ -0,0 +1,39 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# Install required packages
|
||||
storage_nsm_mount_virtio_packages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- xfsprogs
|
||||
|
||||
# Ensure log directory exists
|
||||
storage_nsm_mount_virtio_logdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
# Install the NSM mount script
|
||||
storage_nsm_mount_virtio_script:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-nsm-mount-virtio
|
||||
- source: salt://storage/tools/sbin/so-nsm-mount-virtio
|
||||
- mode: 755
|
||||
- user: root
|
||||
- group: root
|
||||
- require:
|
||||
- pkg: storage_nsm_mount_virtio_packages
|
||||
- file: storage_nsm_mount_virtio_logdir
|
||||
|
||||
# Execute the mount script if not already mounted
|
||||
storage_nsm_mount_virtio_execute:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-nsm-mount-virtio
|
||||
- unless: mountpoint -q /nsm
|
||||
- require:
|
||||
- file: storage_nsm_mount_virtio_script
|
||||
@@ -81,7 +81,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
LOG_FILE="/opt/so/log/so-nsm-mount.log"
|
||||
LOG_FILE="/opt/so/log/so-nsm-mount-nvme.log"
|
||||
VG_NAME=""
|
||||
LV_NAME="nsm"
|
||||
MOUNT_POINT="/nsm"
|
||||
171
salt/storage/tools/sbin/so-nsm-mount-virtio
Normal file
171
salt/storage/tools/sbin/so-nsm-mount-virtio
Normal file
@@ -0,0 +1,171 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# Usage:
|
||||
# so-nsm-mount-virtio
|
||||
#
|
||||
# Options:
|
||||
# None - script automatically configures /dev/vdb
|
||||
#
|
||||
# Examples:
|
||||
# 1. Configure and mount virtio-blk device:
|
||||
# ```bash
|
||||
# sudo so-nsm-mount-virtio
|
||||
# ```
|
||||
#
|
||||
# Notes:
|
||||
# - Requires root privileges
|
||||
# - Mounts /dev/vdb as /nsm
|
||||
# - Creates XFS filesystem if needed
|
||||
# - Configures persistent mount via /etc/fstab
|
||||
# - Safe to run multiple times
|
||||
#
|
||||
# Description:
|
||||
# This script automates the configuration and mounting of virtio-blk devices
|
||||
# as /nsm in Security Onion virtual machines. It performs these steps:
|
||||
#
|
||||
# Dependencies:
|
||||
# - xfsprogs: Required for XFS filesystem operations
|
||||
#
|
||||
# 1. Safety Checks:
|
||||
# - Verifies root privileges
|
||||
# - Checks if /nsm is already mounted
|
||||
# - Verifies /dev/vdb exists
|
||||
#
|
||||
# 2. Filesystem Creation:
|
||||
# - Creates XFS filesystem on /dev/vdb if not already formatted
|
||||
#
|
||||
# 3. Mount Configuration:
|
||||
# - Creates /nsm directory if needed
|
||||
# - Adds entry to /etc/fstab for persistence
|
||||
# - Mounts the filesystem as /nsm
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0: Success conditions:
|
||||
# - Device configured and mounted
|
||||
# - Already properly mounted
|
||||
# 1: Error conditions:
|
||||
# - Must be run as root
|
||||
# - Device /dev/vdb not found
|
||||
# - Filesystem creation failed
|
||||
# - Mount operation failed
|
||||
#
|
||||
# Logging:
|
||||
# - All operations logged to /opt/so/log/so-nsm-mount-virtio.log
|
||||
|
||||
set -e
|
||||
|
||||
LOG_FILE="/opt/so/log/so-nsm-mount-virtio.log"
|
||||
DEVICE="/dev/vdb"
|
||||
MOUNT_POINT="/nsm"
|
||||
|
||||
# Function to log messages
|
||||
log() {
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') $1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to log errors
|
||||
log_error() {
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') ERROR: $1" | tee -a "$LOG_FILE" >&2
|
||||
}
|
||||
|
||||
# Function to check if running as root
|
||||
check_root() {
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
log_error "Must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log "=========================================="
|
||||
log "Starting virtio-blk NSM mount process"
|
||||
log "=========================================="
|
||||
|
||||
# Check root privileges
|
||||
check_root
|
||||
|
||||
# Check if already mounted
|
||||
if mountpoint -q "$MOUNT_POINT"; then
|
||||
log "$MOUNT_POINT is already mounted"
|
||||
log "=========================================="
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if device exists
|
||||
if [ ! -b "$DEVICE" ]; then
|
||||
log_error "Device $DEVICE not found"
|
||||
log "=========================================="
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Found device: $DEVICE"
|
||||
|
||||
# Get device size
|
||||
local size=$(lsblk -dbn -o SIZE "$DEVICE" 2>/dev/null | numfmt --to=iec)
|
||||
log "Device size: $size"
|
||||
|
||||
# Check if device has filesystem
|
||||
if ! blkid "$DEVICE" | grep -q 'TYPE="xfs"'; then
|
||||
log "Creating XFS filesystem on $DEVICE"
|
||||
if ! mkfs.xfs -f "$DEVICE" 2>&1 | tee -a "$LOG_FILE"; then
|
||||
log_error "Failed to create filesystem"
|
||||
log "=========================================="
|
||||
exit 1
|
||||
fi
|
||||
log "Filesystem created successfully"
|
||||
else
|
||||
log "Device already has XFS filesystem"
|
||||
fi
|
||||
|
||||
# Create mount point
|
||||
if [ ! -d "$MOUNT_POINT" ]; then
|
||||
log "Creating mount point $MOUNT_POINT"
|
||||
mkdir -p "$MOUNT_POINT"
|
||||
fi
|
||||
|
||||
# Add to fstab if not present
|
||||
if ! grep -q "$DEVICE.*$MOUNT_POINT" /etc/fstab; then
|
||||
log "Adding entry to /etc/fstab"
|
||||
echo "$DEVICE $MOUNT_POINT xfs defaults 0 0" >> /etc/fstab
|
||||
log "Entry added to /etc/fstab"
|
||||
else
|
||||
log "Entry already exists in /etc/fstab"
|
||||
fi
|
||||
|
||||
# Mount the filesystem
|
||||
log "Mounting $DEVICE to $MOUNT_POINT"
|
||||
if mount "$MOUNT_POINT" 2>&1 | tee -a "$LOG_FILE"; then
|
||||
log "Successfully mounted $DEVICE to $MOUNT_POINT"
|
||||
|
||||
# Verify mount
|
||||
if mountpoint -q "$MOUNT_POINT"; then
|
||||
log "Mount verified successfully"
|
||||
|
||||
# Display mount information
|
||||
log "Mount details:"
|
||||
df -h "$MOUNT_POINT" | tail -n 1 | tee -a "$LOG_FILE"
|
||||
else
|
||||
log_error "Mount verification failed"
|
||||
log "=========================================="
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
log_error "Failed to mount $DEVICE"
|
||||
log "=========================================="
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "=========================================="
|
||||
log "Virtio-blk NSM mount process completed successfully"
|
||||
log "=========================================="
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
30
salt/zeek/policy/custom/filters/dns
Normal file
30
salt/zeek/policy/custom/filters/dns
Normal file
@@ -0,0 +1,30 @@
|
||||
hook DNS::log_policy(rec: DNS::Info, id: Log::ID, filter: Log::Filter)
|
||||
{
|
||||
# Only put a single name per line otherwise there will be memory issues!
|
||||
# If the query comes back blank don't log
|
||||
if (!rec?$query)
|
||||
break;
|
||||
|
||||
# If the query comes back with one of these don't log
|
||||
if (rec?$query && /google.com$/ in rec$query)
|
||||
break;
|
||||
|
||||
# If the query comes back with one of these don't log
|
||||
if (rec?$query && /.apple.com$/ in rec$query)
|
||||
break;
|
||||
|
||||
# Don't log reverse lookups
|
||||
if (rec?$query && /.in-addr.arpa/ in to_lower(rec$query))
|
||||
break;
|
||||
|
||||
# Don't log netbios lookups. This generates a cray amount of logs
|
||||
if (rec?$qtype_name && /NB/ in rec$qtype_name)
|
||||
break;
|
||||
}
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
Log::remove_default_filter(DNS::LOG);
|
||||
local filter: Log::Filter = [$name="dns-filter"];
|
||||
Log::add_filter(DNS::LOG, filter);
|
||||
}
|
||||
13
salt/zeek/policy/custom/filters/files
Normal file
13
salt/zeek/policy/custom/filters/files
Normal file
@@ -0,0 +1,13 @@
|
||||
hook Files::log_policy(rec: Files::Info, id: Log::ID, filter: Log::Filter)
|
||||
{
|
||||
# Turn off a specific mimetype
|
||||
if (rec?$mime_type && ( /soap+xml/ | /json/ | /xml/ | /x509/ )in rec$mime_type)
|
||||
break;
|
||||
}
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
Log::remove_default_filter(Files::LOG);
|
||||
local filter: Log::Filter = [$name="files-filter"];
|
||||
Log::add_filter(Files::LOG, filter);
|
||||
}
|
||||
20
salt/zeek/policy/custom/filters/httphost
Normal file
20
salt/zeek/policy/custom/filters/httphost
Normal file
@@ -0,0 +1,20 @@
|
||||
### HTTP filter by host entries by string #####
|
||||
|
||||
module Filterhttp;
|
||||
|
||||
export {
|
||||
global remove_host_entries: set[string] = {"www.genevalab.com", "www.google.com"};
|
||||
}
|
||||
|
||||
hook HTTP::log_policy(rec: HTTP::Info, id: Log::ID, filter: Log::Filter)
|
||||
{
|
||||
# Remove HTTP host entries
|
||||
if ( ! rec?$host || rec$host in remove_host_entries )
|
||||
break;
|
||||
}
|
||||
event zeek_init()
|
||||
{
|
||||
Log::remove_default_filter(HTTP::LOG);
|
||||
local filter: Log::Filter = [$name="http-filter"];
|
||||
Log::add_filter(HTTP::LOG, filter);
|
||||
}
|
||||
14
salt/zeek/policy/custom/filters/httpuri
Normal file
14
salt/zeek/policy/custom/filters/httpuri
Normal file
@@ -0,0 +1,14 @@
|
||||
### HTTP filter by uri using pattern ####
|
||||
|
||||
hook HTTP::log_policy(rec: HTTP::Info, id: Log::ID, filter: Log::Filter)
|
||||
{
|
||||
# Remove HTTP uri entries by regex
|
||||
if ( rec?$uri && /^\/kratos\// in rec$uri )
|
||||
break;
|
||||
}
|
||||
event zeek_init()
|
||||
{
|
||||
Log::remove_default_filter(HTTP::LOG);
|
||||
local filter: Log::Filter = [$name="http-filter"];
|
||||
Log::add_filter(HTTP::LOG, filter);
|
||||
}
|
||||
29
salt/zeek/policy/custom/filters/ssl
Normal file
29
salt/zeek/policy/custom/filters/ssl
Normal file
@@ -0,0 +1,29 @@
|
||||
### Log filter by JA3S md5 hash:
|
||||
hook SSL::log_policy(rec: SSL::Info, id: Log::ID, filter: Log::Filter)
|
||||
{
|
||||
# SSL log filter Ja3s by md5
|
||||
if (rec?c$ssl$ja3s_cipher && ( /623de93db17d313345d7ea481e7443cf/ )in rec$c$ssl$ja3s_cipher)
|
||||
break;
|
||||
}
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
Log::remove_default_filter(SSL::LOG);
|
||||
local filter: Log::Filter = [$name="ssl-filter"];
|
||||
Log::add_filter(SSL::LOG, filter);
|
||||
}
|
||||
|
||||
### Log filter by server name:
|
||||
hook SSL::log_policy(rec: SSL::Info, id: Log::ID, filter: Log::Filter)
|
||||
{
|
||||
# SSL log filter by server name
|
||||
if (rec?$server_name && ( /api.github.com$/ ) in rec$server_name)
|
||||
break;
|
||||
}
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
Log::remove_default_filter(SSL::LOG);
|
||||
local filter: Log::Filter = [$name="ssl-filter"];
|
||||
Log::add_filter(SSL::LOG, filter);
|
||||
}
|
||||
17
salt/zeek/policy/custom/filters/tunnel
Normal file
17
salt/zeek/policy/custom/filters/tunnel
Normal file
@@ -0,0 +1,17 @@
|
||||
global tunnel_subnet: set[subnet]={
|
||||
|
||||
10.19.0.0/24
|
||||
|
||||
};
|
||||
|
||||
hook Tunnel::log_policy(rec: Tunnel::Info, id: Log::ID, Filter: Log::Filter)
|
||||
{
|
||||
if (rec$id$orig_h in tunnel_subnet || rec$id$resp_h in tunnel_subnet)
|
||||
break;
|
||||
}
|
||||
event zeek_init()
|
||||
{
|
||||
Log::remove_default_filter(Tunnel::LOG);
|
||||
local filter: Log::Filter = [$name="tunnel-filter"];
|
||||
Log::add_filter(Tunnel::LOG, filter);
|
||||
}
|
||||
@@ -61,6 +61,48 @@ zeek:
|
||||
global: True
|
||||
advanced: True
|
||||
duplicates: True
|
||||
dns:
|
||||
description: DNS Filter for Zeek. This is an advanced setting and will take further action to enable.
|
||||
helpLink: zeek.html
|
||||
file: True
|
||||
global: True
|
||||
advanced: True
|
||||
duplicates: True
|
||||
files:
|
||||
description: Files Filter for Zeek. This is an advanced setting and will take further action to enable.
|
||||
helpLink: zeek.html
|
||||
file: True
|
||||
global: True
|
||||
advanced: True
|
||||
duplicates: True
|
||||
httphost:
|
||||
description: HTTP Hosts Filter for Zeek. This is an advanced setting and will take further action to enable.
|
||||
helpLink: zeek.html
|
||||
file: True
|
||||
global: True
|
||||
advanced: True
|
||||
duplicates: True
|
||||
httpuri:
|
||||
description: HTTP URI Filter for Zeek. This is an advanced setting and will take further action to enable.
|
||||
helpLink: zeek.html
|
||||
file: True
|
||||
global: True
|
||||
advanced: True
|
||||
duplicates: True
|
||||
ssl:
|
||||
description: SSL Filter for Zeek. This is an advanced setting and will take further action to enable.
|
||||
helpLink: zeek.html
|
||||
file: True
|
||||
global: True
|
||||
advanced: True
|
||||
duplicates: True
|
||||
tunnel:
|
||||
description: Tunnel Filter for Zeek. This is an advanced setting and will take further action to enable.
|
||||
helpLink: zeek.html
|
||||
file: True
|
||||
global: True
|
||||
advanced: True
|
||||
duplicates: True
|
||||
file_extraction:
|
||||
description: Contains a list of file or MIME types Zeek will extract from the network streams. Values must adhere to the following format - {"MIME_TYPE":"FILE_EXTENSION"}
|
||||
forcedType: "[]{}"
|
||||
|
||||
Reference in New Issue
Block a user