mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge pull request #14784 from Security-Onion-Solutions/vlb2
hardware virtualization
This commit is contained in:
3
.github/.gitleaks.toml
vendored
3
.github/.gitleaks.toml
vendored
@@ -541,5 +541,6 @@ paths = [
|
|||||||
'''gitleaks.toml''',
|
'''gitleaks.toml''',
|
||||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||||
'''(go.mod|go.sum)$''',
|
'''(go.mod|go.sum)$''',
|
||||||
'''salt/nginx/files/enterprise-attack.json'''
|
'''salt/nginx/files/enterprise-attack.json''',
|
||||||
|
'''(.*?)whl$'''
|
||||||
]
|
]
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
# Created by https://www.gitignore.io/api/macos,windows
|
# Created by https://www.gitignore.io/api/macos,windows
|
||||||
# Edit at https://www.gitignore.io/?templates=macos,windows
|
# Edit at https://www.gitignore.io/?templates=macos,windows
|
||||||
|
|
||||||
|
|||||||
34
pillar/hypervisor/nodes.sls
Normal file
34
pillar/hypervisor/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{% set node_types = {} %}
|
||||||
|
{% for minionid, ip in salt.saltutil.runner(
|
||||||
|
'mine.get',
|
||||||
|
tgt='G@role:so-hypervisor or G@role:so-managerhype',
|
||||||
|
fun='network.ip_addrs',
|
||||||
|
tgt_type='compound') | dictsort()
|
||||||
|
%}
|
||||||
|
|
||||||
|
# only add a node to the pillar if it returned an ip from the mine
|
||||||
|
{% if ip | length > 0%}
|
||||||
|
{% set hostname = minionid.split('_') | first %}
|
||||||
|
{% set node_type = minionid.split('_') | last %}
|
||||||
|
{% if node_type not in node_types.keys() %}
|
||||||
|
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||||
|
{% else %}
|
||||||
|
{% if hostname not in node_types[node_type] %}
|
||||||
|
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||||
|
{% else %}
|
||||||
|
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
|
||||||
|
hypervisor:
|
||||||
|
nodes:
|
||||||
|
{% for node_type, values in node_types.items() %}
|
||||||
|
{{node_type}}:
|
||||||
|
{% for hostname, ip in values.items() %}
|
||||||
|
{{hostname}}:
|
||||||
|
ip: {{ip}}
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
@@ -18,6 +18,7 @@ base:
|
|||||||
- telegraf.adv_telegraf
|
- telegraf.adv_telegraf
|
||||||
- versionlock.soc_versionlock
|
- versionlock.soc_versionlock
|
||||||
- versionlock.adv_versionlock
|
- versionlock.adv_versionlock
|
||||||
|
- soc.license
|
||||||
|
|
||||||
'* and not *_desktop':
|
'* and not *_desktop':
|
||||||
- firewall.soc_firewall
|
- firewall.soc_firewall
|
||||||
@@ -25,7 +26,12 @@ base:
|
|||||||
- nginx.soc_nginx
|
- nginx.soc_nginx
|
||||||
- nginx.adv_nginx
|
- nginx.adv_nginx
|
||||||
|
|
||||||
'*_manager or *_managersearch':
|
'salt-cloud:driver:libvirt':
|
||||||
|
- match: grain
|
||||||
|
- vm.soc_vm
|
||||||
|
- vm.adv_vm
|
||||||
|
|
||||||
|
'*_manager or *_managersearch or *_managerhype':
|
||||||
- match: compound
|
- match: compound
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||||
@@ -44,7 +50,6 @@ base:
|
|||||||
- logstash.adv_logstash
|
- logstash.adv_logstash
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- kratos.soc_kratos
|
- kratos.soc_kratos
|
||||||
@@ -70,6 +75,9 @@ base:
|
|||||||
- kafka.nodes
|
- kafka.nodes
|
||||||
- kafka.soc_kafka
|
- kafka.soc_kafka
|
||||||
- kafka.adv_kafka
|
- kafka.adv_kafka
|
||||||
|
- hypervisor.nodes
|
||||||
|
- hypervisor.soc_hypervisor
|
||||||
|
- hypervisor.adv_hypervisor
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
|
|
||||||
'*_sensor':
|
'*_sensor':
|
||||||
@@ -87,7 +95,6 @@ base:
|
|||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
- soc.license
|
|
||||||
|
|
||||||
'*_eval':
|
'*_eval':
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
@@ -114,7 +121,6 @@ base:
|
|||||||
- idstools.adv_idstools
|
- idstools.adv_idstools
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- strelka.soc_strelka
|
- strelka.soc_strelka
|
||||||
@@ -174,7 +180,6 @@ base:
|
|||||||
- manager.adv_manager
|
- manager.adv_manager
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- strelka.soc_strelka
|
- strelka.soc_strelka
|
||||||
@@ -240,7 +245,6 @@ base:
|
|||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
- soc.license
|
|
||||||
- kafka.nodes
|
- kafka.nodes
|
||||||
- kafka.soc_kafka
|
- kafka.soc_kafka
|
||||||
- kafka.adv_kafka
|
- kafka.adv_kafka
|
||||||
@@ -258,8 +262,6 @@ base:
|
|||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- kafka.nodes
|
- kafka.nodes
|
||||||
- kafka.soc_kafka
|
- kafka.soc_kafka
|
||||||
- kafka.adv_kafka
|
|
||||||
- soc.license
|
|
||||||
|
|
||||||
'*_import':
|
'*_import':
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
@@ -283,7 +285,6 @@ base:
|
|||||||
- manager.adv_manager
|
- manager.adv_manager
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
@@ -319,8 +320,12 @@ base:
|
|||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
|
||||||
|
'*_hypervisor':
|
||||||
|
- minions.{{ grains.id }}
|
||||||
|
- minions.adv_{{ grains.id }}
|
||||||
|
|
||||||
'*_desktop':
|
'*_desktop':
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
- soc.license
|
|
||||||
|
|||||||
246
salt/_modules/qcow2.py
Normal file
246
salt/_modules/qcow2.py
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
#!py
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
|
||||||
|
for modifying network configurations within QCOW2 images and adjusting virtual machine hardware settings.
|
||||||
|
It serves as a Salt interface to the so-qcow2-modify-network and so-kvm-modify-hardware scripts.
|
||||||
|
|
||||||
|
The module offers two main capabilities:
|
||||||
|
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
|
||||||
|
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
|
||||||
|
|
||||||
|
This module is intended to work with Security Onion's virtualization infrastructure and is typically
|
||||||
|
used in conjunction with salt-cloud for VM provisioning and management.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
__virtualname__ = 'qcow2'
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
return __virtualname__
|
||||||
|
|
||||||
|
def modify_network_config(image, interface, mode, vm_name, ip4=None, gw4=None, dns4=None, search4=None):
|
||||||
|
'''
|
||||||
|
Usage:
|
||||||
|
salt '*' qcow2.modify_network_config image=<path> interface=<iface> mode=<mode> vm_name=<name> [ip4=<addr>] [gw4=<addr>] [dns4=<servers>] [search4=<domain>]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
image
|
||||||
|
Path to the QCOW2 image file that will be modified
|
||||||
|
interface
|
||||||
|
Network interface name to configure (e.g., 'enp1s0')
|
||||||
|
mode
|
||||||
|
Network configuration mode, either 'dhcp4' or 'static4'
|
||||||
|
vm_name
|
||||||
|
Full name of the VM (hostname_role)
|
||||||
|
ip4
|
||||||
|
IPv4 address with CIDR notation (e.g., '192.168.1.10/24')
|
||||||
|
Required when mode='static4'
|
||||||
|
gw4
|
||||||
|
IPv4 gateway address (e.g., '192.168.1.1')
|
||||||
|
Required when mode='static4'
|
||||||
|
dns4
|
||||||
|
Comma-separated list of IPv4 DNS servers (e.g., '8.8.8.8,8.8.4.4')
|
||||||
|
Optional for both DHCP and static configurations
|
||||||
|
search4
|
||||||
|
DNS search domain for IPv4 (e.g., 'example.local')
|
||||||
|
Optional for both DHCP and static configurations
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. **Configure DHCP:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='dhcp4'
|
||||||
|
```
|
||||||
|
This configures enp1s0 to use DHCP for IP assignment
|
||||||
|
|
||||||
|
2. **Configure Static IP:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='static4' ip4='192.168.1.10/24' gw4='192.168.1.1' dns4='192.168.1.1,8.8.8.8' search4='example.local'
|
||||||
|
```
|
||||||
|
This sets a static IP configuration with DNS servers and search domain
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- The QCOW2 image must be accessible and writable by the salt minion
|
||||||
|
- The image should not be in use by a running VM when modified
|
||||||
|
- Network changes take effect on next VM boot
|
||||||
|
- Requires so-qcow2-modify-network script to be installed
|
||||||
|
|
||||||
|
Description:
|
||||||
|
This function modifies network configuration within a QCOW2 image file by executing
|
||||||
|
the so-qcow2-modify-network script. It supports both DHCP and static IPv4 configuration.
|
||||||
|
The script mounts the image, modifies the network configuration files, and unmounts
|
||||||
|
safely. All operations are logged for troubleshooting purposes.
|
||||||
|
|
||||||
|
Exit Codes:
|
||||||
|
0: Success
|
||||||
|
1: Invalid parameters or configuration
|
||||||
|
2: Image access or mounting error
|
||||||
|
3: Network configuration error
|
||||||
|
4: System command error
|
||||||
|
255: Unexpected error
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
- All operations are logged to the salt minion log
|
||||||
|
- Log entries are prefixed with 'qcow2 module:'
|
||||||
|
- Error conditions include detailed error messages and stack traces
|
||||||
|
- Success/failure status is logged for verification
|
||||||
|
'''
|
||||||
|
|
||||||
|
cmd = ['/usr/sbin/so-qcow2-modify-network', '-I', image, '-i', interface, '-n', vm_name]
|
||||||
|
|
||||||
|
if mode.lower() == 'dhcp4':
|
||||||
|
cmd.append('--dhcp4')
|
||||||
|
elif mode.lower() == 'static4':
|
||||||
|
cmd.append('--static4')
|
||||||
|
if not ip4 or not gw4:
|
||||||
|
raise ValueError('Both ip4 and gw4 are required for static configuration.')
|
||||||
|
cmd.extend(['--ip4', ip4, '--gw4', gw4])
|
||||||
|
if dns4:
|
||||||
|
cmd.extend(['--dns4', dns4])
|
||||||
|
if search4:
|
||||||
|
cmd.extend(['--search4', search4])
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid mode '{}'. Expected 'dhcp4' or 'static4'.".format(mode))
|
||||||
|
|
||||||
|
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||||
|
ret = {
|
||||||
|
'retcode': result.returncode,
|
||||||
|
'stdout': result.stdout,
|
||||||
|
'stderr': result.stderr
|
||||||
|
}
|
||||||
|
if result.returncode != 0:
|
||||||
|
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||||
|
else:
|
||||||
|
log.info('qcow2 module: Script executed successfully.')
|
||||||
|
return ret
|
||||||
|
except Exception as e:
|
||||||
|
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False):
|
||||||
|
'''
|
||||||
|
Usage:
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name=<name> [cpu=<count>] [memory=<size>] [pci=<id>] [pci=<id>] [start=<bool>]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
vm_name
|
||||||
|
Name of the virtual machine to modify
|
||||||
|
cpu
|
||||||
|
Number of virtual CPUs to assign (positive integer)
|
||||||
|
Optional - VM's current CPU count retained if not specified
|
||||||
|
memory
|
||||||
|
Amount of memory to assign in MiB (positive integer)
|
||||||
|
Optional - VM's current memory size retained if not specified
|
||||||
|
pci
|
||||||
|
PCI hardware ID(s) to passthrough to the VM (e.g., '0000:c7:00.0')
|
||||||
|
Can be specified multiple times for multiple devices
|
||||||
|
Optional - no PCI passthrough if not specified
|
||||||
|
start
|
||||||
|
Boolean flag to start the VM after modification
|
||||||
|
Optional - defaults to False
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. **Modify CPU and Memory:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=4 memory=8192
|
||||||
|
```
|
||||||
|
This assigns 4 CPUs and 8GB memory to the VM
|
||||||
|
|
||||||
|
2. **Enable PCI Passthrough:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name='sensor1' pci='0000:c7:00.0' pci='0000:c4:00.0' start=True
|
||||||
|
```
|
||||||
|
This configures PCI passthrough and starts the VM
|
||||||
|
|
||||||
|
3. **Complete Hardware Configuration:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=8 memory=16384 pci='0000:c7:00.0' start=True
|
||||||
|
```
|
||||||
|
This sets CPU, memory, PCI passthrough, and starts the VM
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- VM must be stopped before modification unless only the start flag is set
|
||||||
|
- Memory is specified in MiB (1024 = 1GB)
|
||||||
|
- PCI devices must be available and not in use by the host
|
||||||
|
- CPU count should align with host capabilities
|
||||||
|
- Requires so-kvm-modify-hardware script to be installed
|
||||||
|
|
||||||
|
Description:
|
||||||
|
This function modifies the hardware configuration of a KVM virtual machine using
|
||||||
|
the so-kvm-modify-hardware script. It can adjust CPU count, memory allocation,
|
||||||
|
and PCI device passthrough. Changes are applied to the VM's libvirt configuration.
|
||||||
|
The VM can optionally be started after modifications are complete.
|
||||||
|
|
||||||
|
Exit Codes:
|
||||||
|
0: Success
|
||||||
|
1: Invalid parameters
|
||||||
|
2: VM state error (running when should be stopped)
|
||||||
|
3: Hardware configuration error
|
||||||
|
4: System command error
|
||||||
|
255: Unexpected error
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
- All operations are logged to the salt minion log
|
||||||
|
- Log entries are prefixed with 'qcow2 module:'
|
||||||
|
- Hardware configuration changes are logged
|
||||||
|
- Errors include detailed messages and stack traces
|
||||||
|
- Final status of modification is logged
|
||||||
|
'''
|
||||||
|
|
||||||
|
cmd = ['/usr/sbin/so-kvm-modify-hardware', '-v', vm_name]
|
||||||
|
|
||||||
|
if cpu is not None:
|
||||||
|
if isinstance(cpu, int) and cpu > 0:
|
||||||
|
cmd.extend(['-c', str(cpu)])
|
||||||
|
else:
|
||||||
|
raise ValueError('cpu must be a positive integer.')
|
||||||
|
if memory is not None:
|
||||||
|
if isinstance(memory, int) and memory > 0:
|
||||||
|
cmd.extend(['-m', str(memory)])
|
||||||
|
else:
|
||||||
|
raise ValueError('memory must be a positive integer.')
|
||||||
|
if pci:
|
||||||
|
# Handle PCI IDs (can be a single device or comma-separated list)
|
||||||
|
if isinstance(pci, str):
|
||||||
|
devices = [dev.strip() for dev in pci.split(',') if dev.strip()]
|
||||||
|
elif isinstance(pci, list):
|
||||||
|
devices = pci
|
||||||
|
else:
|
||||||
|
devices = [pci]
|
||||||
|
|
||||||
|
# Add each device with its own -p flag
|
||||||
|
for device in devices:
|
||||||
|
cmd.extend(['-p', str(device)])
|
||||||
|
if start:
|
||||||
|
cmd.append('-s')
|
||||||
|
|
||||||
|
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||||
|
ret = {
|
||||||
|
'retcode': result.returncode,
|
||||||
|
'stdout': result.stdout,
|
||||||
|
'stderr': result.stderr
|
||||||
|
}
|
||||||
|
if result.returncode != 0:
|
||||||
|
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||||
|
else:
|
||||||
|
log.info('qcow2 module: Script executed successfully.')
|
||||||
|
return ret
|
||||||
|
except Exception as e:
|
||||||
|
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||||
|
raise
|
||||||
1092
salt/_runners/setup_hypervisor.py
Normal file
1092
salt/_runners/setup_hypervisor.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,264 +1,178 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
||||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||||
{% set saltversion = saltversion.salt.minion.version %}
|
{% set saltversion = saltversion.salt.minion.version %}
|
||||||
|
|
||||||
{# this is the list we are returning from this map file, it gets built below #}
|
{# Define common state groups to reduce redundancy #}
|
||||||
|
{% set base_states = [
|
||||||
|
'common',
|
||||||
|
'patch.os.schedule',
|
||||||
|
'motd',
|
||||||
|
'salt.minion-check',
|
||||||
|
'sensoroni',
|
||||||
|
'salt.lasthighstate',
|
||||||
|
'salt.minion'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set ssl_states = [
|
||||||
|
'ssl',
|
||||||
|
'telegraf',
|
||||||
|
'firewall',
|
||||||
|
'schedule',
|
||||||
|
'docker_clean'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set manager_states = [
|
||||||
|
'salt.master',
|
||||||
|
'ca',
|
||||||
|
'registry',
|
||||||
|
'manager',
|
||||||
|
'nginx',
|
||||||
|
'influxdb',
|
||||||
|
'soc',
|
||||||
|
'kratos',
|
||||||
|
'hydra',
|
||||||
|
'elasticfleet',
|
||||||
|
'elastic-fleet-package-registry',
|
||||||
|
'idstools',
|
||||||
|
'suricata.manager',
|
||||||
|
'utility'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set sensor_states = [
|
||||||
|
'pcap',
|
||||||
|
'suricata',
|
||||||
|
'healthcheck',
|
||||||
|
'tcpreplay',
|
||||||
|
'zeek',
|
||||||
|
'strelka'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set kafka_states = [
|
||||||
|
'kafka'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set stig_states = [
|
||||||
|
'stig'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set elastic_stack_states = [
|
||||||
|
'elasticsearch',
|
||||||
|
'elasticsearch.auth',
|
||||||
|
'kibana',
|
||||||
|
'kibana.secrets',
|
||||||
|
'elastalert',
|
||||||
|
'logstash',
|
||||||
|
'redis'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{# Initialize the allowed_states list #}
|
||||||
{% set allowed_states = [] %}
|
{% set allowed_states = [] %}
|
||||||
|
|
||||||
{% if grains.saltversion | string == saltversion | string %}
|
{% if grains.saltversion | string == saltversion | string %}
|
||||||
|
{# Map role-specific states #}
|
||||||
|
{% set role_states = {
|
||||||
|
'so-eval': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
sensor_states +
|
||||||
|
elastic_stack_states | reject('equalto', 'logstash') | list
|
||||||
|
),
|
||||||
|
'so-heavynode': (
|
||||||
|
ssl_states +
|
||||||
|
sensor_states +
|
||||||
|
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||||
|
),
|
||||||
|
'so-idh': (
|
||||||
|
ssl_states +
|
||||||
|
['idh']
|
||||||
|
),
|
||||||
|
'so-import': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||||
|
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
|
||||||
|
),
|
||||||
|
'so-manager': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-managerhype': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-managersearch': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
['strelka.manager'] +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-searchnode': (
|
||||||
|
ssl_states +
|
||||||
|
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
|
||||||
|
stig_states
|
||||||
|
),
|
||||||
|
'so-standalone': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
sensor_states +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-sensor': (
|
||||||
|
ssl_states +
|
||||||
|
sensor_states +
|
||||||
|
['nginx'] +
|
||||||
|
stig_states
|
||||||
|
),
|
||||||
|
'so-fleet': (
|
||||||
|
ssl_states +
|
||||||
|
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||||
|
),
|
||||||
|
'so-receiver': (
|
||||||
|
ssl_states +
|
||||||
|
kafka_states +
|
||||||
|
stig_states +
|
||||||
|
['logstash', 'redis']
|
||||||
|
),
|
||||||
|
'so-hypervisor': (
|
||||||
|
ssl_states +
|
||||||
|
stig_states +
|
||||||
|
['hypervisor', 'libvirt']
|
||||||
|
),
|
||||||
|
'so-desktop': (
|
||||||
|
['ssl', 'docker_clean', 'telegraf'] +
|
||||||
|
stig_states
|
||||||
|
)
|
||||||
|
} %}
|
||||||
|
|
||||||
{% set allowed_states= salt['grains.filter_by']({
|
{# Get states for the current role #}
|
||||||
'so-eval': [
|
{% if grains.role in role_states %}
|
||||||
'salt.master',
|
{% set allowed_states = role_states[grains.role] %}
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elasticfleet',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'healthcheck',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-heavynode': [
|
|
||||||
'ssl',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'healthcheck',
|
|
||||||
'elasticagent',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-idh': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'idh',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-import': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'influxdb',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'pcap',
|
|
||||||
'utility',
|
|
||||||
'suricata',
|
|
||||||
'zeek',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean',
|
|
||||||
'elasticfleet',
|
|
||||||
'elastic-fleet-package-registry'
|
|
||||||
],
|
|
||||||
'so-manager': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elasticfleet',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka'
|
|
||||||
],
|
|
||||||
'so-managersearch': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'elasticfleet',
|
|
||||||
'firewall',
|
|
||||||
'manager',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka'
|
|
||||||
],
|
|
||||||
'so-searchnode': [
|
|
||||||
'ssl',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka.ca',
|
|
||||||
'kafka.ssl'
|
|
||||||
],
|
|
||||||
'so-standalone': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'elasticfleet',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'healthcheck',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka'
|
|
||||||
],
|
|
||||||
'so-sensor': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'nginx',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'healthcheck',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean',
|
|
||||||
'stig'
|
|
||||||
],
|
|
||||||
'so-fleet': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'logstash',
|
|
||||||
'nginx',
|
|
||||||
'healthcheck',
|
|
||||||
'schedule',
|
|
||||||
'elasticfleet',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-receiver': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'kafka',
|
|
||||||
'stig'
|
|
||||||
],
|
|
||||||
'so-desktop': [
|
|
||||||
'ssl',
|
|
||||||
'docker_clean',
|
|
||||||
'telegraf',
|
|
||||||
'stig'
|
|
||||||
],
|
|
||||||
}, grain='role') %}
|
|
||||||
|
|
||||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
|
||||||
{% do allowed_states.append('zeek') %}
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
|
||||||
{% do allowed_states.append('strelka') %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
{# Add base states that apply to all roles #}
|
||||||
{% do allowed_states.append('elasticsearch') %}
|
{% for state in base_states %}
|
||||||
|
{% do allowed_states.append(state) %}
|
||||||
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
{# Add airgap state if needed #}
|
||||||
{% do allowed_states.append('elasticsearch.auth') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
|
||||||
{% do allowed_states.append('kibana') %}
|
|
||||||
{% do allowed_states.append('kibana.secrets') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
|
||||||
{% do allowed_states.append('elastalert') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
|
||||||
{% do allowed_states.append('logstash') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
|
|
||||||
{% do allowed_states.append('redis') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{# all nodes on the right salt version can run the following states #}
|
|
||||||
{% do allowed_states.append('common') %}
|
|
||||||
{% do allowed_states.append('patch.os.schedule') %}
|
|
||||||
{% do allowed_states.append('motd') %}
|
|
||||||
{% do allowed_states.append('salt.minion-check') %}
|
|
||||||
{% do allowed_states.append('sensoroni') %}
|
|
||||||
{% do allowed_states.append('salt.lasthighstate') %}
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
|
|
||||||
{% if ISAIRGAP %}
|
{% if ISAIRGAP %}
|
||||||
{% do allowed_states.append('airgap') %}
|
{% do allowed_states.append('airgap') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# all nodes can always run salt.minion state #}
|
|
||||||
{% do allowed_states.append('salt.minion') %}
|
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ Etc/UTC:
|
|||||||
timezone.system
|
timezone.system
|
||||||
|
|
||||||
# Sync curl configuration for Elasticsearch authentication
|
# Sync curl configuration for Elasticsearch authentication
|
||||||
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %}
|
||||||
elastic_curl_config:
|
elastic_curl_config:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /opt/so/conf/elasticsearch/curl.config
|
- name: /opt/so/conf/elasticsearch/curl.config
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init
|
||||||
|
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
|
||||||
{% if GLOBALS.os_family == 'Debian' %}
|
{% if grains.os_family == 'Debian' %}
|
||||||
commonpkgs:
|
commonpkgs:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- skip_suggestions: True
|
- skip_suggestions: True
|
||||||
@@ -46,7 +46,7 @@ python-rich:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if GLOBALS.os_family == 'RedHat' %}
|
{% if grains.os_family == 'RedHat' %}
|
||||||
|
|
||||||
remove_mariadb:
|
remove_mariadb:
|
||||||
pkg.removed:
|
pkg.removed:
|
||||||
|
|||||||
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def setup_logging(logger_name, log_file_path, log_level=logging.INFO, format_str='%(asctime)s - %(levelname)s - %(message)s'):
|
||||||
|
"""
|
||||||
|
Sets up logging for a script.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
logger_name (str): The name of the logger.
|
||||||
|
log_file_path (str): The file path for the log file.
|
||||||
|
log_level (int): The logging level (e.g., logging.INFO, logging.DEBUG).
|
||||||
|
format_str (str): The format string for log messages.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
logging.Logger: Configured logger object.
|
||||||
|
"""
|
||||||
|
logger = logging.getLogger(logger_name)
|
||||||
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
|
# Create directory for log file if it doesn't exist
|
||||||
|
log_file_dir = os.path.dirname(log_file_path)
|
||||||
|
if log_file_dir and not os.path.exists(log_file_dir):
|
||||||
|
try:
|
||||||
|
os.makedirs(log_file_dir)
|
||||||
|
except OSError as e:
|
||||||
|
print(f"Error creating directory {log_file_dir}: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Create handlers
|
||||||
|
c_handler = logging.StreamHandler()
|
||||||
|
f_handler = logging.FileHandler(log_file_path)
|
||||||
|
c_handler.setLevel(log_level)
|
||||||
|
f_handler.setLevel(log_level)
|
||||||
|
|
||||||
|
# Create formatter and add it to handlers
|
||||||
|
formatter = logging.Formatter(format_str)
|
||||||
|
c_handler.setFormatter(formatter)
|
||||||
|
f_handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# Add handlers to the logger if they are not already added
|
||||||
|
if not logger.hasHandlers():
|
||||||
|
logger.addHandler(c_handler)
|
||||||
|
logger.addHandler(f_handler)
|
||||||
|
|
||||||
|
return logger
|
||||||
@@ -0,0 +1,132 @@
|
|||||||
|
#!/opt/saltstack/salt/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||||
|
|
||||||
|
"""
|
||||||
|
Script for emitting VM deployment status events to the Salt event bus.
|
||||||
|
|
||||||
|
This script provides functionality to emit status events for VM deployment operations,
|
||||||
|
used by various Security Onion VM management tools.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
so-salt-emit-vm-deployment-status-event -v <vm_name> -H <hypervisor> -s <status>
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
-v, --vm-name Name of the VM (hostname_role)
|
||||||
|
-H, --hypervisor Name of the hypervisor
|
||||||
|
-s, --status Current deployment status of the VM
|
||||||
|
|
||||||
|
Example:
|
||||||
|
so-salt-emit-vm-deployment-status-event -v sensor1_sensor -H hypervisor1 -s "Creating"
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import salt.client
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def emit_event(vm_name: str, hypervisor: str, status: str) -> bool:
|
||||||
|
"""
|
||||||
|
Emit a VM deployment status event to the salt event bus.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vm_name: Name of the VM (hostname_role)
|
||||||
|
hypervisor: Name of the hypervisor
|
||||||
|
status: Current deployment status of the VM
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if event was sent successfully, False otherwise
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If status is not a valid deployment status
|
||||||
|
"""
|
||||||
|
log.info("Attempting to emit deployment event...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
caller = salt.client.Caller()
|
||||||
|
event_data = {
|
||||||
|
'vm_name': vm_name,
|
||||||
|
'hypervisor': hypervisor,
|
||||||
|
'status': status
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use consistent event tag structure
|
||||||
|
event_tag = f'soc/dyanno/hypervisor/{status.lower()}'
|
||||||
|
|
||||||
|
ret = caller.cmd(
|
||||||
|
'event.send',
|
||||||
|
event_tag,
|
||||||
|
event_data
|
||||||
|
)
|
||||||
|
|
||||||
|
if not ret:
|
||||||
|
log.error("Failed to emit VM deployment status event: %s", event_data)
|
||||||
|
return False
|
||||||
|
|
||||||
|
log.info("Successfully emitted VM deployment status event: %s", event_data)
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error emitting VM deployment status event: %s", str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
"""Parse command line arguments."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Emit VM deployment status events to the Salt event bus.'
|
||||||
|
)
|
||||||
|
parser.add_argument('-v', '--vm-name', required=True,
|
||||||
|
help='Name of the VM (hostname_role)')
|
||||||
|
parser.add_argument('-H', '--hypervisor', required=True,
|
||||||
|
help='Name of the hypervisor')
|
||||||
|
parser.add_argument('-s', '--status', required=True,
|
||||||
|
help='Current deployment status of the VM')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point for the script."""
|
||||||
|
try:
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
success = emit_event(
|
||||||
|
vm_name=args.vm_name,
|
||||||
|
hypervisor=args.hypervisor,
|
||||||
|
status=args.status
|
||||||
|
)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to emit status event: %s", str(e))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
||||||
|
{%- else -%}
|
||||||
|
|
||||||
|
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
|
||||||
|
{% endif -%}
|
||||||
@@ -166,7 +166,7 @@ eaoptionalintegrationsdir:
|
|||||||
|
|
||||||
{% for minion in node_data %}
|
{% for minion in node_data %}
|
||||||
{% set role = node_data[minion]["role"] %}
|
{% set role = node_data[minion]["role"] %}
|
||||||
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
|
{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %}
|
||||||
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
||||||
{% set integration_keys = optional_integrations.keys() %}
|
{% set integration_keys = optional_integrations.keys() %}
|
||||||
fleet_server_integrations_{{ minion }}:
|
fleet_server_integrations_{{ minion }}:
|
||||||
|
|||||||
@@ -28,7 +28,7 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
{% if grains.id.split('_') | last in ['manager','managerhype','managersearch','standalone'] %}
|
||||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||||
|
|||||||
@@ -4501,6 +4501,14 @@ elasticsearch:
|
|||||||
- data
|
- data
|
||||||
- remote_cluster_client
|
- remote_cluster_client
|
||||||
- transform
|
- transform
|
||||||
|
so-managerhype:
|
||||||
|
config:
|
||||||
|
node:
|
||||||
|
roles:
|
||||||
|
- master
|
||||||
|
- data
|
||||||
|
- remote_cluster_client
|
||||||
|
- transform
|
||||||
so-managersearch:
|
so-managersearch:
|
||||||
config:
|
config:
|
||||||
node:
|
node:
|
||||||
|
|||||||
@@ -204,7 +204,7 @@ so-elasticsearch-roles-load:
|
|||||||
- docker_container: so-elasticsearch
|
- docker_container: so-elasticsearch
|
||||||
- file: elasticsearch_sbin_jinja
|
- file: elasticsearch_sbin_jinja
|
||||||
|
|
||||||
{% if grains.role in ['so-managersearch', 'so-manager'] %}
|
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||||
{% set ap = "absent" %}
|
{% set ap = "absent" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
'so-strelka-filestream'
|
'so-strelka-filestream'
|
||||||
] %}
|
] %}
|
||||||
|
|
||||||
{% elif GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %}
|
{% elif GLOBALS.role in ['so-manager', 'so-standalone','so-managersearch', 'so-managerhype'] %}
|
||||||
{% set NODE_CONTAINERS = [
|
{% set NODE_CONTAINERS = [
|
||||||
'so-dockerregistry',
|
'so-dockerregistry',
|
||||||
'so-elasticsearch',
|
'so-elasticsearch',
|
||||||
|
|||||||
@@ -14,11 +14,13 @@ firewall:
|
|||||||
external_kafka: []
|
external_kafka: []
|
||||||
fleet: []
|
fleet: []
|
||||||
heavynode: []
|
heavynode: []
|
||||||
|
hypervisor: []
|
||||||
idh: []
|
idh: []
|
||||||
import: []
|
import: []
|
||||||
localhost:
|
localhost:
|
||||||
- 127.0.0.1
|
- 127.0.0.1
|
||||||
manager: []
|
manager: []
|
||||||
|
managerhype: []
|
||||||
managersearch: []
|
managersearch: []
|
||||||
receiver: []
|
receiver: []
|
||||||
searchnode: []
|
searchnode: []
|
||||||
@@ -489,6 +491,15 @@ firewall:
|
|||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
customhostgroup0:
|
customhostgroup0:
|
||||||
portgroups: []
|
portgroups: []
|
||||||
customhostgroup1:
|
customhostgroup1:
|
||||||
@@ -541,6 +552,218 @@ firewall:
|
|||||||
desktop:
|
desktop:
|
||||||
portgroups:
|
portgroups:
|
||||||
- salt_manager
|
- salt_manager
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
self:
|
||||||
|
portgroups:
|
||||||
|
- syslog
|
||||||
|
syslog:
|
||||||
|
portgroups:
|
||||||
|
- syslog
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
managerhype:
|
||||||
|
chain:
|
||||||
|
DOCKER-USER:
|
||||||
|
hostgroups:
|
||||||
|
managerhype:
|
||||||
|
portgroups:
|
||||||
|
- kibana
|
||||||
|
- redis
|
||||||
|
- influxdb
|
||||||
|
- elasticsearch_rest
|
||||||
|
- elasticsearch_node
|
||||||
|
- docker_registry
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- localrules
|
||||||
|
- sensoroni
|
||||||
|
fleet:
|
||||||
|
portgroups:
|
||||||
|
- elasticsearch_rest
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
- yum
|
||||||
|
- beats_5044
|
||||||
|
- beats_5644
|
||||||
|
- beats_5056
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
idh:
|
||||||
|
portgroups:
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
- yum
|
||||||
|
- beats_5044
|
||||||
|
- beats_5644
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
sensor:
|
||||||
|
portgroups:
|
||||||
|
- beats_5044
|
||||||
|
- beats_5644
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
searchnode:
|
||||||
|
portgroups:
|
||||||
|
- redis
|
||||||
|
- elasticsearch_rest
|
||||||
|
- elasticsearch_node
|
||||||
|
- beats_5644
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
heavynode:
|
||||||
|
portgroups:
|
||||||
|
- redis
|
||||||
|
- elasticsearch_rest
|
||||||
|
- elasticsearch_node
|
||||||
|
- beats_5644
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
receiver:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
analyst:
|
||||||
|
portgroups:
|
||||||
|
- nginx
|
||||||
|
beats_endpoint:
|
||||||
|
portgroups:
|
||||||
|
- beats_5044
|
||||||
|
beats_endpoint_ssl:
|
||||||
|
portgroups:
|
||||||
|
- beats_5644
|
||||||
|
elasticsearch_rest:
|
||||||
|
portgroups:
|
||||||
|
- elasticsearch_rest
|
||||||
|
elastic_agent_endpoint:
|
||||||
|
portgroups:
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
endgame:
|
||||||
|
portgroups:
|
||||||
|
- endgame
|
||||||
|
external_suricata:
|
||||||
|
portgroups:
|
||||||
|
- external_suricata
|
||||||
|
desktop:
|
||||||
|
portgroups:
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
- yum
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
INPUT:
|
||||||
|
hostgroups:
|
||||||
|
anywhere:
|
||||||
|
portgroups:
|
||||||
|
- ssh
|
||||||
|
dockernet:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
fleet:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
idh:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
localhost:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
sensor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
searchnode:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
heavynode:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
receiver:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
desktop:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
self:
|
self:
|
||||||
portgroups:
|
portgroups:
|
||||||
- syslog
|
- syslog
|
||||||
@@ -1472,3 +1695,64 @@ firewall:
|
|||||||
portgroups: []
|
portgroups: []
|
||||||
customhostgroup9:
|
customhostgroup9:
|
||||||
portgroups: []
|
portgroups: []
|
||||||
|
hypervisor:
|
||||||
|
chain:
|
||||||
|
DOCKER-USER:
|
||||||
|
hostgroups:
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
INPUT:
|
||||||
|
hostgroups:
|
||||||
|
anywhere:
|
||||||
|
portgroups:
|
||||||
|
- ssh
|
||||||
|
dockernet:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
localhost:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
manager:
|
||||||
|
portgroups: []
|
||||||
|
managersearch:
|
||||||
|
portgroups: []
|
||||||
|
standalone:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
|||||||
@@ -91,6 +91,10 @@ COMMIT
|
|||||||
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
||||||
-A INPUT -p icmp -j ACCEPT
|
-A INPUT -p icmp -j ACCEPT
|
||||||
-A INPUT -j LOGGING
|
-A INPUT -j LOGGING
|
||||||
|
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhyper'] -%}
|
||||||
|
-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||||
|
-A FORWARD -i br0 -o br0 -j ACCEPT
|
||||||
|
{%- endif %}
|
||||||
-A FORWARD -j DOCKER-USER
|
-A FORWARD -j DOCKER-USER
|
||||||
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
||||||
-A FORWARD -o sobridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
-A FORWARD -o sobridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ firewall:
|
|||||||
external_kafka: *hostgroupsettings
|
external_kafka: *hostgroupsettings
|
||||||
fleet: *hostgroupsettings
|
fleet: *hostgroupsettings
|
||||||
heavynode: *hostgroupsettings
|
heavynode: *hostgroupsettings
|
||||||
|
hypervisor: *hostgroupsettings
|
||||||
idh: *hostgroupsettings
|
idh: *hostgroupsettings
|
||||||
import: *hostgroupsettings
|
import: *hostgroupsettings
|
||||||
localhost: *ROhostgroupsettingsadv
|
localhost: *ROhostgroupsettingsadv
|
||||||
|
|||||||
58
salt/hypervisor/defaults.yaml
Normal file
58
salt/hypervisor/defaults.yaml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
hypervisor:
|
||||||
|
model:
|
||||||
|
testModel:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 128
|
||||||
|
disk:
|
||||||
|
1: pci_0000_c7_00_0
|
||||||
|
2: pci_0000_c8_00_0
|
||||||
|
copper:
|
||||||
|
1: pci_0000_c4_00_0
|
||||||
|
2: pci_0000_c4_00_1
|
||||||
|
3: pci_0000_c4_00_2
|
||||||
|
4: pci_0000_c4_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_02_00_0
|
||||||
|
6: pci_0000_02_00_1
|
||||||
|
7: pci_0000_41_00_0
|
||||||
|
8: pci_0000_41_00_1
|
||||||
|
model1:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 128
|
||||||
|
disk:
|
||||||
|
1: pci_0000_c7_00_0
|
||||||
|
2: pci_0000_c8_00_0
|
||||||
|
copper:
|
||||||
|
1: pci_0000_c4_00_0
|
||||||
|
2: pci_0000_c4_00_1
|
||||||
|
3: pci_0000_c4_00_2
|
||||||
|
4: pci_0000_c4_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_02_00_0
|
||||||
|
6: pci_0000_02_00_1
|
||||||
|
7: pci_0000_41_00_0
|
||||||
|
8: pci_0000_41_00_1
|
||||||
|
model2:
|
||||||
|
cpu: 256
|
||||||
|
memory: 256
|
||||||
|
disk:
|
||||||
|
1: pci_0000_c7_00_0
|
||||||
|
2: pci_0000_c8_00_0
|
||||||
|
3: pci_0000_c9_00_0
|
||||||
|
4: pci_0000_c10_00_0
|
||||||
|
copper:
|
||||||
|
1: pci_0000_c4_00_0
|
||||||
|
2: pci_0000_c4_00_1
|
||||||
|
3: pci_0000_c4_00_2
|
||||||
|
4: pci_0000_c4_00_3
|
||||||
|
5: pci_0000_c5_00_0
|
||||||
|
6: pci_0000_c5_00_1
|
||||||
|
7: pci_0000_c5_00_2
|
||||||
|
8: pci_0000_c5_00_3
|
||||||
|
sfp:
|
||||||
|
9: pci_0000_02_00_0
|
||||||
|
10: pci_0000_02_00_1
|
||||||
|
11: pci_0000_41_00_0
|
||||||
|
12: pci_0000_41_00_1
|
||||||
1
salt/hypervisor/hosts/README
Normal file
1
salt/hypervisor/hosts/README
Normal file
@@ -0,0 +1 @@
|
|||||||
|
This directory will contain hypervisor hosts. We need this README in place to ensure /opt/so/saltstack/local/salt/hypervisor/hosts directory gets created during setup.
|
||||||
49
salt/hypervisor/init.sls
Normal file
49
salt/hypervisor/init.sls
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
hypervisor_log_dir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/log/hypervisor
|
||||||
|
|
||||||
|
hypervisor_sbin:
|
||||||
|
file.recurse:
|
||||||
|
- name: /usr/sbin
|
||||||
|
- source: salt://hypervisor/tools/sbin
|
||||||
|
- file_mode: 744
|
||||||
|
|
||||||
|
hypervisor_sbin_jinja:
|
||||||
|
file.recurse:
|
||||||
|
- name: /usr/sbin
|
||||||
|
- source: salt://hypervisor/tools/sbin_jinja
|
||||||
|
- template: jinja
|
||||||
|
- file_mode: 744
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
164
salt/hypervisor/map.jinja
Normal file
164
salt/hypervisor/map.jinja
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0.
|
||||||
|
|
||||||
|
Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
|
||||||
|
"You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
in the software, and you may not remove or obscure any functionality in the
|
||||||
|
software that is protected by the license key." #}
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
{# Import defaults.yaml for model hardware capabilities #}
|
||||||
|
{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %}
|
||||||
|
|
||||||
|
{# Get hypervisor nodes from pillar #}
|
||||||
|
{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %}
|
||||||
|
|
||||||
|
{# Build enhanced HYPERVISORS structure #}
|
||||||
|
{% set HYPERVISORS = {} %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: NODES content: ' ~ NODES | tojson) %}
|
||||||
|
{% for role, hypervisors in NODES.items() %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing role: ' ~ role) %}
|
||||||
|
{% do HYPERVISORS.update({role: {}}) %}
|
||||||
|
{% for hypervisor, config in hypervisors.items() %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing hypervisor: ' ~ hypervisor ~ ' with config: ' ~ config | tojson) %}
|
||||||
|
{# Get model from cached grains using Salt runner #}
|
||||||
|
{% set grains = salt.saltutil.runner('cache.grains', tgt=hypervisor ~ '_*', tgt_type='glob') %}
|
||||||
|
{% set model = '' %}
|
||||||
|
{% if grains %}
|
||||||
|
{% set minion_id = grains.keys() | first %}
|
||||||
|
{% set model = grains[minion_id].get('sosmodel', '') %}
|
||||||
|
{% endif %}
|
||||||
|
{% set model_config = DEFAULTS.hypervisor.model.get(model, {}) %}
|
||||||
|
|
||||||
|
{# Get VM list from VMs file #}
|
||||||
|
{% set vms = {} %}
|
||||||
|
{% set vm_list = [] %}
|
||||||
|
{% set vm_list_file = 'hypervisor/hosts/' ~ hypervisor ~ 'VMs' %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list file: ' ~ vm_list_file) %}
|
||||||
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_list_file) %}
|
||||||
|
{% import_json vm_list_file as vm_list %}
|
||||||
|
{% endif %}
|
||||||
|
{% if vm_list %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list content: ' ~ vm_list | tojson) %}
|
||||||
|
{% else %}
|
||||||
|
{# we won't get here if the vm_list_file doesn't exist because we will get TemplateNotFound on the import_json #}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list empty: ' ~ vm_list_file) %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# Load status and configuration for each VM #}
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{# Get VM details from list entry #}
|
||||||
|
{% set hostname = vm.get('hostname', '') %}
|
||||||
|
{% set role = vm.get('role', '') %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing VM - hostname: ' ~ hostname ~ ', role: ' ~ role) %}
|
||||||
|
|
||||||
|
{# Load VM configuration from config file #}
|
||||||
|
{% set vm_file = 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config file: ' ~ vm_file) %}
|
||||||
|
{% import_json vm_file as vm_state %}
|
||||||
|
{% if vm_state %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config content: ' ~ vm_state | tojson) %}
|
||||||
|
{% set vm_data = {'config': vm_state.config} %}
|
||||||
|
|
||||||
|
{# Load VM status from status file #}
|
||||||
|
{% set status_file = vm_file ~ '.status' %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status file: ' ~ status_file) %}
|
||||||
|
{% import_json status_file as status_data %}
|
||||||
|
{% if status_data %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status content: ' ~ status_data | tojson) %}
|
||||||
|
{% do vm_data.update({'status': status_data}) %}
|
||||||
|
{% else %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Status file empty: ' ~ status_file) %}
|
||||||
|
{% do vm_data.update({
|
||||||
|
'status': {
|
||||||
|
'status': '',
|
||||||
|
'details': null,
|
||||||
|
'timestamp': ''
|
||||||
|
}
|
||||||
|
}) %}
|
||||||
|
{% endif %}
|
||||||
|
{% do vms.update({hostname ~ '_' ~ role: vm_data}) %}
|
||||||
|
{% else %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Config file empty: ' ~ vm_file) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{# Find and add destroyed VMs from status files #}
|
||||||
|
{% set processed_vms = [] %}
|
||||||
|
{% for vm_full_name, vm_data in vms.items() %}
|
||||||
|
{% do processed_vms.append(vm_full_name) %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{# Find all status files for this hypervisor #}
|
||||||
|
{% set relative_path = 'hypervisor/hosts/' ~ hypervisor %}
|
||||||
|
{% set absolute_path = '/opt/so/saltstack/local/salt/' ~ relative_path %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Scanning for status files in: ' ~ absolute_path) %}
|
||||||
|
|
||||||
|
{# Try to find status files using file.find with absolute path #}
|
||||||
|
{% set status_files = salt['file.find'](absolute_path, name='*_*.status', type='f') %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Found status files: ' ~ status_files | tojson) %}
|
||||||
|
|
||||||
|
{# Convert absolute paths back to relative paths for processing #}
|
||||||
|
{% set relative_status_files = [] %}
|
||||||
|
{% for status_file in status_files %}
|
||||||
|
{% set relative_file = status_file | replace('/opt/so/saltstack/local/salt/', '') %}
|
||||||
|
{% do relative_status_files.append(relative_file) %}
|
||||||
|
{% endfor %}
|
||||||
|
{% set status_files = relative_status_files %}
|
||||||
|
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Converted to relative paths: ' ~ status_files | tojson) %}
|
||||||
|
|
||||||
|
{% for status_file in status_files %}
|
||||||
|
{# Extract the VM name from the filename #}
|
||||||
|
{% set basename = status_file.split('/')[-1] %}
|
||||||
|
{% set vm_name = basename.replace('.status', '') %}
|
||||||
|
{% set hostname = vm_name.split('_')[0] %}
|
||||||
|
|
||||||
|
{# Skip already processed VMs #}
|
||||||
|
{% if vm_name in processed_vms %}
|
||||||
|
{% continue %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# Read the status file #}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing potential destroyed VM status file: ' ~ status_file) %}
|
||||||
|
{% import_json status_file as status_data %}
|
||||||
|
|
||||||
|
{# Only process files with "Destroyed Instance" status #}
|
||||||
|
{% if status_data and status_data.status == 'Destroyed Instance' %}
|
||||||
|
{% do salt.log.debug('salt/hypervisor/map.jinja: Found VM with Destroyed Instance status: ' ~ vm_name) %}
|
||||||
|
|
||||||
|
{# Add to vms with minimal config #}
|
||||||
|
{% do vms.update({
|
||||||
|
vm_name: {
|
||||||
|
'status': status_data,
|
||||||
|
'config': {}
|
||||||
|
}
|
||||||
|
}) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{# Merge node config with model capabilities and VM states #}
|
||||||
|
{% do HYPERVISORS[role].update({
|
||||||
|
hypervisor: {
|
||||||
|
'config': config,
|
||||||
|
'model': model,
|
||||||
|
'hardware': model_config.get('hardware', {}),
|
||||||
|
'vms': vms
|
||||||
|
}
|
||||||
|
}) %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{% do salt.log.error(
|
||||||
|
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||||
|
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||||
|
'for more information about purchasing a license to enable this feature.'
|
||||||
|
) %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
221
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
221
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
# RAID-1 Setup Script for NVMe Drives
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# DESCRIPTION:
|
||||||
|
# This script automatically sets up a RAID-1 (mirrored) array using two NVMe drives
|
||||||
|
# (/dev/nvme0n1 and /dev/nvme1n1) and mounts it at /nsm with XFS filesystem.
|
||||||
|
#
|
||||||
|
# FUNCTIONALITY:
|
||||||
|
# - Detects and reports existing RAID configurations
|
||||||
|
# - Thoroughly cleans target drives of any existing data/configurations
|
||||||
|
# - Creates GPT partition tables with RAID-type partitions
|
||||||
|
# - Establishes RAID-1 array (/dev/md0) for data redundancy
|
||||||
|
# - Formats the array with XFS filesystem for performance
|
||||||
|
# - Automatically mounts at /nsm and configures for boot persistence
|
||||||
|
# - Provides monitoring information for resync operations
|
||||||
|
#
|
||||||
|
# SAFETY FEATURES:
|
||||||
|
# - Requires root privileges
|
||||||
|
# - Exits gracefully if RAID already exists and is mounted
|
||||||
|
# - Performs comprehensive cleanup to avoid conflicts
|
||||||
|
# - Forces partition table updates and waits for system recognition
|
||||||
|
#
|
||||||
|
# PREREQUISITES:
|
||||||
|
# - Two NVMe drives: /dev/nvme0n1 and /dev/nvme1n1
|
||||||
|
# - Root access
|
||||||
|
# - mdadm, sgdisk, and standard Linux utilities
|
||||||
|
#
|
||||||
|
# WARNING: This script will DESTROY all data on the target drives!
|
||||||
|
#
|
||||||
|
# USAGE: sudo ./raid_setup.sh
|
||||||
|
#
|
||||||
|
#################################################################
|
||||||
|
|
||||||
|
# Exit on any error
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Function to log messages
|
||||||
|
log() {
|
||||||
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if running as root
|
||||||
|
check_root() {
|
||||||
|
if [ "$EUID" -ne 0 ]; then
|
||||||
|
log "Error: Please run as root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if RAID is already set up
|
||||||
|
check_existing_raid() {
|
||||||
|
if [ -e "/dev/md0" ]; then
|
||||||
|
if mdadm --detail /dev/md0 &>/dev/null; then
|
||||||
|
local raid_state=$(mdadm --detail /dev/md0 | grep "State" | awk '{print $3}')
|
||||||
|
local mount_point="/nsm"
|
||||||
|
|
||||||
|
log "Found existing RAID array /dev/md0 (State: $raid_state)"
|
||||||
|
|
||||||
|
if mountpoint -q "$mount_point"; then
|
||||||
|
log "RAID is already mounted at $mount_point"
|
||||||
|
log "Current RAID details:"
|
||||||
|
mdadm --detail /dev/md0
|
||||||
|
|
||||||
|
# Check if resyncing
|
||||||
|
if grep -q "resync" /proc/mdstat; then
|
||||||
|
log "RAID is currently resyncing:"
|
||||||
|
grep resync /proc/mdstat
|
||||||
|
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||||
|
else
|
||||||
|
log "RAID is fully synced and operational"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show disk usage
|
||||||
|
log "Current disk usage:"
|
||||||
|
df -h "$mount_point"
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if any of the target devices are in use
|
||||||
|
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||||
|
if lsblk -o NAME,MOUNTPOINT "$device" | grep -q "nsm"; then
|
||||||
|
log "Error: $device is already mounted at /nsm"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if mdadm --examine "$device" &>/dev/null || mdadm --examine "${device}p1" &>/dev/null; then
|
||||||
|
log "Error: $device appears to be part of an existing RAID array"
|
||||||
|
log "To reuse this device, you must first:"
|
||||||
|
log "1. Unmount any filesystems"
|
||||||
|
log "2. Stop the RAID array: mdadm --stop /dev/md0"
|
||||||
|
log "3. Zero the superblock: mdadm --zero-superblock ${device}p1"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to ensure devices are not in use
|
||||||
|
ensure_devices_free() {
|
||||||
|
local device=$1
|
||||||
|
|
||||||
|
log "Cleaning up device $device"
|
||||||
|
|
||||||
|
# Kill any processes using the device
|
||||||
|
fuser -k "${device}"* 2>/dev/null || true
|
||||||
|
|
||||||
|
# Force unmount any partitions
|
||||||
|
for part in "${device}"*; do
|
||||||
|
if mount | grep -q "$part"; then
|
||||||
|
umount -f "$part" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Stop any MD arrays using this device
|
||||||
|
for md in $(ls /dev/md* 2>/dev/null || true); do
|
||||||
|
if mdadm --detail "$md" 2>/dev/null | grep -q "$device"; then
|
||||||
|
mdadm --stop "$md" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Clear MD superblock
|
||||||
|
mdadm --zero-superblock "${device}"* 2>/dev/null || true
|
||||||
|
|
||||||
|
# Remove LVM PV if exists
|
||||||
|
pvremove -ff -y "$device" 2>/dev/null || true
|
||||||
|
|
||||||
|
# Clear all signatures
|
||||||
|
wipefs -af "$device" 2>/dev/null || true
|
||||||
|
|
||||||
|
# Delete partition table
|
||||||
|
dd if=/dev/zero of="$device" bs=512 count=2048 2>/dev/null || true
|
||||||
|
dd if=/dev/zero of="$device" bs=512 seek=$(( $(blockdev --getsz "$device") - 2048 )) count=2048 2>/dev/null || true
|
||||||
|
|
||||||
|
# Force kernel to reread
|
||||||
|
blockdev --rereadpt "$device" 2>/dev/null || true
|
||||||
|
partprobe -s "$device" 2>/dev/null || true
|
||||||
|
sleep 2
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main script
|
||||||
|
main() {
|
||||||
|
log "Starting RAID setup script"
|
||||||
|
|
||||||
|
# Check if running as root
|
||||||
|
check_root
|
||||||
|
|
||||||
|
# Check for existing RAID setup
|
||||||
|
check_existing_raid
|
||||||
|
|
||||||
|
# Clean up any existing MD arrays
|
||||||
|
log "Cleaning up existing MD arrays"
|
||||||
|
mdadm --stop --scan 2>/dev/null || true
|
||||||
|
|
||||||
|
# Clear mdadm configuration
|
||||||
|
log "Clearing mdadm configuration"
|
||||||
|
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||||
|
|
||||||
|
# Clean and prepare devices
|
||||||
|
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||||
|
ensure_devices_free "$device"
|
||||||
|
|
||||||
|
log "Creating new partition table on $device"
|
||||||
|
sgdisk -Z "$device"
|
||||||
|
sgdisk -o "$device"
|
||||||
|
|
||||||
|
log "Creating RAID partition"
|
||||||
|
sgdisk -n 1:0:0 -t 1:fd00 "$device"
|
||||||
|
|
||||||
|
partprobe "$device"
|
||||||
|
udevadm settle
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
log "Final verification of partition availability"
|
||||||
|
if ! [ -b "/dev/nvme0n1p1" ] || ! [ -b "/dev/nvme1n1p1" ]; then
|
||||||
|
log "Error: Partitions not available after creation"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Creating RAID array"
|
||||||
|
mdadm --create /dev/md0 --level=1 --raid-devices=2 \
|
||||||
|
--metadata=1.2 \
|
||||||
|
/dev/nvme0n1p1 /dev/nvme1n1p1 \
|
||||||
|
--force --run
|
||||||
|
|
||||||
|
log "Creating XFS filesystem"
|
||||||
|
mkfs.xfs -f /dev/md0
|
||||||
|
|
||||||
|
log "Creating mount point"
|
||||||
|
mkdir -p /nsm
|
||||||
|
|
||||||
|
log "Updating fstab"
|
||||||
|
sed -i '/\/dev\/md0/d' /etc/fstab
|
||||||
|
echo "/dev/md0 /nsm xfs defaults,nofail 0 0" >> /etc/fstab
|
||||||
|
|
||||||
|
log "Reloading systemd daemon"
|
||||||
|
systemctl daemon-reload
|
||||||
|
|
||||||
|
log "Mounting filesystem"
|
||||||
|
mount -a
|
||||||
|
|
||||||
|
log "Saving RAID configuration"
|
||||||
|
mdadm --detail --scan > /etc/mdadm.conf
|
||||||
|
|
||||||
|
log "RAID setup complete"
|
||||||
|
log "RAID array details:"
|
||||||
|
mdadm --detail /dev/md0
|
||||||
|
|
||||||
|
if grep -q "resync" /proc/mdstat; then
|
||||||
|
log "RAID is currently resyncing. You can monitor progress with:"
|
||||||
|
log "watch -n 60 cat /proc/mdstat"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main "$@"
|
||||||
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
@@ -0,0 +1,424 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Script for configuring network interface predictability in Security Onion VMs.
|
||||||
|
This script modifies the necessary files to ensure consistent network interface naming.
|
||||||
|
|
||||||
|
The script performs the following operations:
|
||||||
|
1. Modifies the BLS entry to set net.ifnames=1
|
||||||
|
2. Removes any existing persistent network rules
|
||||||
|
3. Updates GRUB configuration
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
so-qcow2-network-predictable -n <domain_name> [-I <qcow2_image_path>]
|
||||||
|
|
||||||
|
**Options:**
|
||||||
|
-n, --name Domain name of the VM to configure
|
||||||
|
-I, --image (Optional) Path to the QCOW2 image. If not provided,
|
||||||
|
defaults to /nsm/libvirt/images/<domain_name>/<domain_name>.qcow2
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
1. **Configure using domain name:**
|
||||||
|
```bash
|
||||||
|
so-qcow2-network-predictable -n sool9
|
||||||
|
```
|
||||||
|
This command will:
|
||||||
|
- Use default image path: /nsm/libvirt/images/sool9/sool9.qcow2
|
||||||
|
- Configure network interface predictability
|
||||||
|
|
||||||
|
2. **Configure using custom image path:**
|
||||||
|
```bash
|
||||||
|
so-qcow2-network-predictable -n sool9 -I /path/to/custom/image.qcow2
|
||||||
|
```
|
||||||
|
This command will:
|
||||||
|
- Use the specified image path
|
||||||
|
- Configure network interface predictability
|
||||||
|
|
||||||
|
**Notes:**
|
||||||
|
- The VM must not be running when executing this script
|
||||||
|
- Requires root privileges
|
||||||
|
- Will automatically find and modify the appropriate BLS entry
|
||||||
|
- Removes /etc/udev/rules.d/70-persistent-net.rules if it exists
|
||||||
|
- Updates GRUB configuration after changes
|
||||||
|
|
||||||
|
**Exit Codes:**
|
||||||
|
- 0: Success
|
||||||
|
- 1: General error (invalid arguments, file operations, etc.)
|
||||||
|
- 2: VM is running
|
||||||
|
- 3: Required files not found
|
||||||
|
- 4: Permission denied
|
||||||
|
|
||||||
|
**Logging:**
|
||||||
|
- Logs are written to /opt/so/log/hypervisor/so-qcow2-network-predictable.log
|
||||||
|
- Both file and console logging are enabled
|
||||||
|
- Log entries include:
|
||||||
|
- Timestamps
|
||||||
|
- Operation details
|
||||||
|
- Error messages
|
||||||
|
- Configuration changes
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import guestfs
|
||||||
|
import glob
|
||||||
|
import libvirt
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from so_logging_utils import setup_logging
|
||||||
|
|
||||||
|
# Set up logging
|
||||||
|
logger = setup_logging(
|
||||||
|
logger_name='so-qcow2-network-predictable',
|
||||||
|
log_file_path='/opt/so/log/hypervisor/so-qcow2-network-predictable.log',
|
||||||
|
log_level=logging.INFO,
|
||||||
|
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
|
||||||
|
def check_domain_status(domain_name):
|
||||||
|
"""
|
||||||
|
Check if the specified domain exists and is not running.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_name (str): Name of the libvirt domain to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if domain exists and is not running, False otherwise
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If domain is running or connection to libvirt fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
conn = libvirt.open('qemu:///system')
|
||||||
|
try:
|
||||||
|
dom = conn.lookupByName(domain_name)
|
||||||
|
is_running = dom.isActive()
|
||||||
|
if is_running:
|
||||||
|
logger.error(f"Domain '{domain_name}' is running - cannot modify configuration")
|
||||||
|
raise RuntimeError(f"Domain '{domain_name}' must not be running")
|
||||||
|
logger.info(f"Domain '{domain_name}' exists and is not running")
|
||||||
|
return True
|
||||||
|
except libvirt.libvirtError as e:
|
||||||
|
if "no domain with matching name" in str(e):
|
||||||
|
logger.error(f"Domain '{domain_name}' not found")
|
||||||
|
raise RuntimeError(f"Domain '{domain_name}' not found")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
except libvirt.libvirtError as e:
|
||||||
|
logger.error(f"Failed to connect to libvirt: {e}")
|
||||||
|
raise RuntimeError(f"Failed to connect to libvirt: {e}")
|
||||||
|
|
||||||
|
def modify_bls_entry(g):
|
||||||
|
"""
|
||||||
|
Find and modify the BLS entry to set net.ifnames=1.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
g: Mounted guestfs handle
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if successful, False if no changes needed
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If BLS entry cannot be found or modified
|
||||||
|
"""
|
||||||
|
bls_dir = "/boot/loader/entries"
|
||||||
|
logger.info(f"Checking BLS directory: {bls_dir}")
|
||||||
|
if g.is_dir(bls_dir):
|
||||||
|
logger.info("BLS directory exists")
|
||||||
|
else:
|
||||||
|
logger.info("Listing /boot contents:")
|
||||||
|
try:
|
||||||
|
boot_contents = g.ls("/boot")
|
||||||
|
logger.info(f"/boot contains: {boot_contents}")
|
||||||
|
if g.is_dir("/boot/loader"):
|
||||||
|
logger.info("Listing /boot/loader contents:")
|
||||||
|
loader_contents = g.ls("/boot/loader")
|
||||||
|
logger.info(f"/boot/loader contains: {loader_contents}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error listing /boot contents: {e}")
|
||||||
|
raise RuntimeError(f"BLS directory not found: {bls_dir}")
|
||||||
|
|
||||||
|
# Find BLS entry file
|
||||||
|
entries = g.glob_expand(f"{bls_dir}/*.conf")
|
||||||
|
logger.info(f"Found BLS entries: {entries}")
|
||||||
|
if not entries:
|
||||||
|
logger.error("No BLS entry files found")
|
||||||
|
raise RuntimeError("No BLS entry files found")
|
||||||
|
|
||||||
|
# Use the first entry found
|
||||||
|
bls_file = entries[0]
|
||||||
|
logger.info(f"Found BLS entry file: {bls_file}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
logger.info(f"Reading BLS file contents from: {bls_file}")
|
||||||
|
content = g.read_file(bls_file).decode('utf-8')
|
||||||
|
logger.info("Current BLS file content:")
|
||||||
|
logger.info("---BEGIN BLS CONTENT---")
|
||||||
|
logger.info(content)
|
||||||
|
logger.info("---END BLS CONTENT---")
|
||||||
|
|
||||||
|
lines = content.splitlines()
|
||||||
|
modified = False
|
||||||
|
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if line.startswith('options '):
|
||||||
|
logger.info(f"Found options line: {line}")
|
||||||
|
|
||||||
|
# First remove any existing net.ifnames parameters (both =0 and =1)
|
||||||
|
new_line = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', line)
|
||||||
|
# Also remove any quoted versions
|
||||||
|
new_line = re.sub(r'\s*"net\.ifnames=[01]"\s*', ' ', new_line)
|
||||||
|
# Clean up multiple spaces
|
||||||
|
new_line = re.sub(r'\s+', ' ', new_line).strip()
|
||||||
|
|
||||||
|
# Now add net.ifnames=1 at the end
|
||||||
|
new_line = f"{new_line} net.ifnames=1"
|
||||||
|
|
||||||
|
if new_line != line:
|
||||||
|
lines[i] = new_line
|
||||||
|
modified = True
|
||||||
|
logger.info(f"Updated options line. New line: {new_line}")
|
||||||
|
break
|
||||||
|
|
||||||
|
if modified:
|
||||||
|
new_content = '\n'.join(lines) + '\n'
|
||||||
|
logger.info("New BLS file content:")
|
||||||
|
logger.info("---BEGIN NEW BLS CONTENT---")
|
||||||
|
logger.info(new_content)
|
||||||
|
logger.info("---END NEW BLS CONTENT---")
|
||||||
|
g.write(bls_file, new_content.encode('utf-8'))
|
||||||
|
logger.info("Successfully updated BLS entry")
|
||||||
|
return True
|
||||||
|
|
||||||
|
logger.info("No changes needed for BLS entry")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to modify BLS entry: {e}")
|
||||||
|
raise RuntimeError(f"Failed to modify BLS entry: {e}")
|
||||||
|
|
||||||
|
def remove_persistent_net_rules(g):
|
||||||
|
"""
|
||||||
|
Remove the persistent network rules file if it exists.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
g: Mounted guestfs handle
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if file was removed, False if it didn't exist
|
||||||
|
"""
|
||||||
|
rules_file = "/etc/udev/rules.d/70-persistent-net.rules"
|
||||||
|
logger.info(f"Checking for persistent network rules file: {rules_file}")
|
||||||
|
try:
|
||||||
|
if g.is_file(rules_file):
|
||||||
|
logger.info("Found persistent network rules file, removing...")
|
||||||
|
g.rm(rules_file)
|
||||||
|
logger.info(f"Successfully removed persistent network rules file: {rules_file}")
|
||||||
|
return True
|
||||||
|
logger.info("No persistent network rules file found")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to remove persistent network rules: {e}")
|
||||||
|
raise RuntimeError(f"Failed to remove persistent network rules: {e}")
|
||||||
|
|
||||||
|
def update_grub_config(g):
|
||||||
|
"""
|
||||||
|
Update GRUB configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
g: Mounted guestfs handle
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If GRUB update fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# First, read the current grubenv to get the existing kernelopts
|
||||||
|
logger.info("Reading current grubenv...")
|
||||||
|
grubenv_content = g.read_file('/boot/grub2/grubenv').decode('utf-8')
|
||||||
|
logger.info("Current grubenv content:")
|
||||||
|
logger.info(grubenv_content)
|
||||||
|
|
||||||
|
# Extract current kernelopts
|
||||||
|
kernelopts_match = re.search(r'^kernelopts="([^"]+)"', grubenv_content, re.MULTILINE)
|
||||||
|
if kernelopts_match:
|
||||||
|
current_kernelopts = kernelopts_match.group(1)
|
||||||
|
logger.info(f"Current kernelopts: {current_kernelopts}")
|
||||||
|
|
||||||
|
# Remove any existing net.ifnames parameters
|
||||||
|
new_kernelopts = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', current_kernelopts)
|
||||||
|
# Clean up multiple spaces
|
||||||
|
new_kernelopts = re.sub(r'\s+', ' ', new_kernelopts).strip()
|
||||||
|
# Add net.ifnames=1
|
||||||
|
new_kernelopts = f"{new_kernelopts} net.ifnames=1"
|
||||||
|
|
||||||
|
logger.info(f"New kernelopts: {new_kernelopts}")
|
||||||
|
|
||||||
|
# Update grubenv with the new kernelopts
|
||||||
|
logger.info("Setting kernelopts with net.ifnames=1...")
|
||||||
|
output_editenv = g.command(['grub2-editenv', '-', 'set', f'kernelopts={new_kernelopts}'])
|
||||||
|
logger.info("grub2-editenv output:")
|
||||||
|
logger.info(output_editenv)
|
||||||
|
else:
|
||||||
|
# If we can't find existing kernelopts, use the default
|
||||||
|
logger.warning("Could not find existing kernelopts, using default")
|
||||||
|
output_editenv = g.command(['grub2-editenv', '-', 'set', 'kernelopts=console=tty0 no_timer_check biosdevname=0 resume=/dev/mapper/vg_main-lv_swap rd.lvm.lv=vg_main/lv_root rd.lvm.lv=vg_main/lv_swap net.ifnames=1 crashkernel=1G-64G:448M,64G-:512M'])
|
||||||
|
logger.info("grub2-editenv output:")
|
||||||
|
logger.info(output_editenv)
|
||||||
|
|
||||||
|
logger.info("Updating grubby with net.ifnames=1...")
|
||||||
|
# First remove any existing net.ifnames arguments
|
||||||
|
output_grubby_remove = g.command(['grubby', '--update-kernel=ALL', '--remove-args=net.ifnames=0 net.ifnames=1'])
|
||||||
|
logger.info("grubby remove output:")
|
||||||
|
logger.info(output_grubby_remove)
|
||||||
|
|
||||||
|
# Then add net.ifnames=1
|
||||||
|
output_grubby_add = g.command(['grubby', '--update-kernel=ALL', '--args=net.ifnames=1'])
|
||||||
|
logger.info("grubby add output:")
|
||||||
|
logger.info(output_grubby_add)
|
||||||
|
|
||||||
|
logger.info("Updating GRUB configuration...")
|
||||||
|
output_mkconfig = g.command(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])
|
||||||
|
logger.info("GRUB update output:")
|
||||||
|
logger.info(output_mkconfig)
|
||||||
|
logger.info("Successfully updated GRUB configuration")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to update GRUB configuration: {e}")
|
||||||
|
raise RuntimeError(f"Failed to update GRUB configuration: {e}")
|
||||||
|
|
||||||
|
def configure_network_predictability(domain_name, image_path=None):
|
||||||
|
"""
|
||||||
|
Configure network interface predictability for a VM.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_name (str): Name of the domain to configure
|
||||||
|
image_path (str, optional): Path to the QCOW2 image
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If configuration fails
|
||||||
|
"""
|
||||||
|
# Check domain status
|
||||||
|
check_domain_status(domain_name)
|
||||||
|
|
||||||
|
# Use default image path if none provided
|
||||||
|
if not image_path:
|
||||||
|
image_path = f"/nsm/libvirt/images/{domain_name}/{domain_name}.qcow2"
|
||||||
|
|
||||||
|
if not os.path.exists(image_path):
|
||||||
|
logger.error(f"Image file not found: {image_path}")
|
||||||
|
raise RuntimeError(f"Image file not found: {image_path}")
|
||||||
|
|
||||||
|
if not os.access(image_path, os.R_OK | os.W_OK):
|
||||||
|
logger.error(f"Permission denied: Cannot access image file {image_path}")
|
||||||
|
raise RuntimeError(f"Permission denied: Cannot access image file {image_path}")
|
||||||
|
|
||||||
|
logger.info(f"Configuring network predictability for domain: {domain_name}")
|
||||||
|
logger.info(f"Using image: {image_path}")
|
||||||
|
|
||||||
|
g = guestfs.GuestFS(python_return_dict=True)
|
||||||
|
try:
|
||||||
|
logger.info("Initializing guestfs...")
|
||||||
|
g.set_network(False)
|
||||||
|
g.selinux = False
|
||||||
|
g.add_drive_opts(image_path, format="qcow2")
|
||||||
|
g.launch()
|
||||||
|
|
||||||
|
logger.info("Inspecting operating system...")
|
||||||
|
roots = g.inspect_os()
|
||||||
|
if not roots:
|
||||||
|
raise RuntimeError("No operating system found in image")
|
||||||
|
|
||||||
|
root = roots[0]
|
||||||
|
logger.info(f"Found root filesystem: {root}")
|
||||||
|
logger.info(f"Operating system type: {g.inspect_get_type(root)}")
|
||||||
|
logger.info(f"Operating system distro: {g.inspect_get_distro(root)}")
|
||||||
|
logger.info(f"Operating system major version: {g.inspect_get_major_version(root)}")
|
||||||
|
logger.info(f"Operating system minor version: {g.inspect_get_minor_version(root)}")
|
||||||
|
|
||||||
|
logger.info("Getting mount points...")
|
||||||
|
mountpoints = g.inspect_get_mountpoints(root)
|
||||||
|
logger.info(f"Found mount points: {mountpoints}")
|
||||||
|
logger.info("Converting mount points to sortable list...")
|
||||||
|
# Convert dictionary to list of tuples
|
||||||
|
mountpoints = list(mountpoints.items())
|
||||||
|
logger.info(f"Converted mount points: {mountpoints}")
|
||||||
|
logger.info("Sorting mount points by path length for proper mount order...")
|
||||||
|
mountpoints.sort(key=lambda m: len(m[0]))
|
||||||
|
logger.info(f"Mount order will be: {[mp[0] for mp in mountpoints]}")
|
||||||
|
|
||||||
|
for mp_path, mp_device in mountpoints:
|
||||||
|
try:
|
||||||
|
logger.info(f"Attempting to mount {mp_device} at {mp_path}")
|
||||||
|
g.mount(mp_device, mp_path)
|
||||||
|
logger.info(f"Successfully mounted {mp_device} at {mp_path}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not mount {mp_device} at {mp_path}: {str(e)}")
|
||||||
|
# Continue with other mounts
|
||||||
|
|
||||||
|
# Perform configuration steps
|
||||||
|
bls_modified = modify_bls_entry(g)
|
||||||
|
rules_removed = remove_persistent_net_rules(g)
|
||||||
|
|
||||||
|
if bls_modified or rules_removed:
|
||||||
|
update_grub_config(g)
|
||||||
|
logger.info("Network predictability configuration completed successfully")
|
||||||
|
else:
|
||||||
|
logger.info("No changes were necessary")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to configure network predictability: {e}")
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
logger.info("Unmounting all filesystems...")
|
||||||
|
g.umount_all()
|
||||||
|
logger.info("Successfully unmounted all filesystems")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error unmounting filesystems: {e}")
|
||||||
|
g.close()
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
"""Parse command line arguments."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Configure network interface predictability for Security Onion VMs"
|
||||||
|
)
|
||||||
|
parser.add_argument("-n", "--name", required=True,
|
||||||
|
help="Domain name of the VM to configure")
|
||||||
|
parser.add_argument("-I", "--image",
|
||||||
|
help="Path to the QCOW2 image (optional)")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point for the script."""
|
||||||
|
try:
|
||||||
|
args = parse_arguments()
|
||||||
|
configure_network_predictability(args.name, args.image)
|
||||||
|
sys.exit(0)
|
||||||
|
except RuntimeError as e:
|
||||||
|
if "must not be running" in str(e):
|
||||||
|
logger.error(str(e))
|
||||||
|
sys.exit(2)
|
||||||
|
elif "not found" in str(e):
|
||||||
|
logger.error(str(e))
|
||||||
|
sys.exit(3)
|
||||||
|
elif "Permission denied" in str(e):
|
||||||
|
logger.error(str(e))
|
||||||
|
sys.exit(4)
|
||||||
|
else:
|
||||||
|
logger.error(str(e))
|
||||||
|
sys.exit(1)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.error("Operation cancelled by user")
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
206
salt/hypervisor/tools/sbin/so-wait-cloud-init
Normal file
206
salt/hypervisor/tools/sbin/so-wait-cloud-init
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Script for waiting for cloud-init to complete on a Security Onion VM.
|
||||||
|
Monitors VM state to ensure proper cloud-init initialization and shutdown.
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
so-wait-cloud-init -n <domain_name>
|
||||||
|
|
||||||
|
**Options:**
|
||||||
|
-n, --name Domain name of the VM to monitor
|
||||||
|
|
||||||
|
**Exit Codes:**
|
||||||
|
- 0: Success (cloud-init completed and VM shutdown)
|
||||||
|
- 1: General error
|
||||||
|
- 2: VM never started
|
||||||
|
- 3: VM stopped too quickly
|
||||||
|
- 4: VM failed to shutdown
|
||||||
|
|
||||||
|
**Description:**
|
||||||
|
This script monitors a VM's state to ensure proper cloud-init initialization and completion:
|
||||||
|
1. Waits for VM to start running
|
||||||
|
2. Verifies VM remains running (not an immediate crash)
|
||||||
|
3. Waits for VM to shutdown (indicating cloud-init completion)
|
||||||
|
4. Verifies VM remains shutdown
|
||||||
|
|
||||||
|
The script is typically used in the libvirt.images state after creating a new VM
|
||||||
|
to ensure cloud-init completes its initialization before proceeding with further
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
**Logging:**
|
||||||
|
- Logs are written to /opt/so/log/hypervisor/so-wait-cloud-init.log
|
||||||
|
- Both file and console logging are enabled
|
||||||
|
- Log entries include:
|
||||||
|
- Timestamps
|
||||||
|
- State changes
|
||||||
|
- Error conditions
|
||||||
|
- Verification steps
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from so_logging_utils import setup_logging
|
||||||
|
|
||||||
|
# Set up logging
|
||||||
|
logger = setup_logging(
|
||||||
|
logger_name='so-wait-cloud-init',
|
||||||
|
log_file_path='/opt/so/log/hypervisor/so-wait-cloud-init.log',
|
||||||
|
log_level=logging.INFO,
|
||||||
|
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
|
||||||
|
def check_vm_running(domain_name):
|
||||||
|
"""
|
||||||
|
Check if VM is in running state.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_name (str): Name of the domain to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if VM is running, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(['virsh', 'list', '--state-running', '--name'],
|
||||||
|
capture_output=True, text=True, check=True)
|
||||||
|
return domain_name in result.stdout.splitlines()
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to check VM state: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def wait_for_vm_start(domain_name, timeout=300):
|
||||||
|
"""
|
||||||
|
Wait for VM to start running.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_name (str): Name of the domain to monitor
|
||||||
|
timeout (int): Maximum time to wait in seconds
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if VM started, False if timeout occurred
|
||||||
|
"""
|
||||||
|
logger.info(f"Waiting for VM {domain_name} to start...")
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
while time.time() - start_time < timeout:
|
||||||
|
if check_vm_running(domain_name):
|
||||||
|
logger.info("VM is running")
|
||||||
|
return True
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
logger.error(f"Timeout waiting for VM {domain_name} to start")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def verify_vm_running(domain_name):
|
||||||
|
"""
|
||||||
|
Verify VM remains running after initial start.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_name (str): Name of the domain to verify
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if VM is still running after verification period
|
||||||
|
"""
|
||||||
|
logger.info("Verifying VM remains running...")
|
||||||
|
time.sleep(5) # Wait to ensure VM is stable
|
||||||
|
|
||||||
|
if not check_vm_running(domain_name):
|
||||||
|
logger.error("VM stopped too quickly after starting")
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.info("VM verified running")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def wait_for_vm_shutdown(domain_name, timeout=600):
|
||||||
|
"""
|
||||||
|
Wait for VM to shutdown.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_name (str): Name of the domain to monitor
|
||||||
|
timeout (int): Maximum time to wait in seconds
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if VM shutdown, False if timeout occurred
|
||||||
|
"""
|
||||||
|
logger.info("Waiting for cloud-init to complete and VM to shutdown...")
|
||||||
|
start_time = time.time()
|
||||||
|
check_count = 0
|
||||||
|
|
||||||
|
while time.time() - start_time < timeout:
|
||||||
|
if not check_vm_running(domain_name):
|
||||||
|
logger.info("VM has shutdown")
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Log status every minute (after 12 checks at 5 second intervals)
|
||||||
|
check_count += 1
|
||||||
|
if check_count % 12 == 0:
|
||||||
|
elapsed = int(time.time() - start_time)
|
||||||
|
logger.info(f"Still waiting for cloud-init... ({elapsed} seconds elapsed)")
|
||||||
|
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
logger.error(f"Timeout waiting for VM {domain_name} to shutdown")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def verify_vm_shutdown(domain_name):
|
||||||
|
"""
|
||||||
|
Verify VM remains shutdown.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain_name (str): Name of the domain to verify
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if VM remains shutdown after verification period
|
||||||
|
"""
|
||||||
|
logger.info("Verifying VM remains shutdown...")
|
||||||
|
time.sleep(5) # Wait to ensure VM state is stable
|
||||||
|
|
||||||
|
if check_vm_running(domain_name):
|
||||||
|
logger.error("VM is still running after shutdown check")
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.info("VM verified shutdown")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Wait for cloud-init to complete on a Security Onion VM"
|
||||||
|
)
|
||||||
|
parser.add_argument("-n", "--name", required=True,
|
||||||
|
help="Domain name of the VM to monitor")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Wait for VM to start
|
||||||
|
if not wait_for_vm_start(args.name):
|
||||||
|
sys.exit(2) # VM never started
|
||||||
|
|
||||||
|
# Verify VM remains running
|
||||||
|
if not verify_vm_running(args.name):
|
||||||
|
sys.exit(3) # VM stopped too quickly
|
||||||
|
|
||||||
|
# Wait for VM to shutdown
|
||||||
|
if not wait_for_vm_shutdown(args.name):
|
||||||
|
sys.exit(4) # VM failed to shutdown
|
||||||
|
|
||||||
|
# Verify VM remains shutdown
|
||||||
|
if not verify_vm_shutdown(args.name):
|
||||||
|
sys.exit(4) # VM failed to stay shutdown
|
||||||
|
|
||||||
|
logger.info("Cloud-init completed successfully")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
60
salt/hypervisor/tools/sbin/so_vm_utils.py
Normal file
60
salt/hypervisor/tools/sbin/so_vm_utils.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import libvirt
|
||||||
|
import logging
|
||||||
|
|
||||||
|
def stop_vm(conn, vm_name, logger):
|
||||||
|
"""
|
||||||
|
Stops the specified virtual machine if it is running.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
conn (libvirt.virConnect): The libvirt connection object.
|
||||||
|
vm_name (str): The name of the virtual machine.
|
||||||
|
logger (logging.Logger): The logger object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
libvirt.virDomain: The domain object of the VM.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SystemExit: If the VM cannot be found or an error occurs.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
dom = conn.lookupByName(vm_name)
|
||||||
|
if dom.isActive():
|
||||||
|
logger.info(f"Shutting down VM '{vm_name}'...")
|
||||||
|
dom.shutdown()
|
||||||
|
# Wait for the VM to shut down
|
||||||
|
while dom.isActive():
|
||||||
|
time.sleep(1)
|
||||||
|
logger.info(f"VM '{vm_name}' has been stopped.")
|
||||||
|
else:
|
||||||
|
logger.info(f"VM '{vm_name}' is already stopped.")
|
||||||
|
return dom
|
||||||
|
except libvirt.libvirtError as e:
|
||||||
|
logger.error(f"Failed to stop VM '{vm_name}': {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def start_vm(dom, logger):
|
||||||
|
"""
|
||||||
|
Starts the specified virtual machine.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
dom (libvirt.virDomain): The domain object of the VM.
|
||||||
|
logger (logging.Logger): The logger object.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SystemExit: If the VM cannot be started.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
dom.create()
|
||||||
|
logger.info(f"VM '{dom.name()}' started successfully.")
|
||||||
|
except libvirt.libvirtError as e:
|
||||||
|
logger.error(f"Failed to start VM '{dom.name()}': {e}")
|
||||||
|
sys.exit(1)
|
||||||
365
salt/hypervisor/tools/sbin_jinja/so-kvm-modify-hardware
Normal file
365
salt/hypervisor/tools/sbin_jinja/so-kvm-modify-hardware
Normal file
@@ -0,0 +1,365 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
"""
|
||||||
|
Script for managing hardware configurations of KVM virtual machines. This script provides
|
||||||
|
functionality to modify CPU, memory, and PCI device settings without manual XML editing
|
||||||
|
or direct libvirt interaction.
|
||||||
|
|
||||||
|
The script offers three main configuration capabilities:
|
||||||
|
1. CPU Management: Adjust virtual CPU count
|
||||||
|
2. Memory Management: Modify memory allocation
|
||||||
|
3. PCI Passthrough: Configure PCI device passthrough for direct hardware access
|
||||||
|
|
||||||
|
This script is designed to work with Security Onion's virtualization infrastructure and is typically
|
||||||
|
used during VM provisioning and hardware reconfiguration tasks.
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
so-kvm-modify-hardware -v <vm_name> [-c <cpu_count>] [-m <memory_amount>] [-p <pci_id>] [-p <pci_id> ...] [-s]
|
||||||
|
|
||||||
|
**Options:**
|
||||||
|
-v, --vm Name of the virtual machine to modify.
|
||||||
|
-c, --cpu Number of virtual CPUs to assign.
|
||||||
|
-m, --memory Amount of memory to assign in MiB.
|
||||||
|
-p, --pci PCI hardware ID(s) to passthrough to the VM (e.g., 0000:00:1f.2). Can be specified multiple times.
|
||||||
|
Format: domain:bus:device.function
|
||||||
|
-s, --start Start the VM after modification.
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
1. **Modify CPU and Memory with Multiple PCI Devices:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
so-kvm-modify-hardware -v vm1_sensor -c 4 -m 8192 -p 0000:c7:00.0 -p 0000:c8:00.0 -s
|
||||||
|
```
|
||||||
|
|
||||||
|
This command modifies a VM with the following settings:
|
||||||
|
- VM Name: `vm1_sensor`
|
||||||
|
- Hardware Configuration:
|
||||||
|
- CPUs: `4`
|
||||||
|
- Memory: `8192` MiB
|
||||||
|
- PCI Device Passthrough: `0000:c7:00.0`, `0000:c8:00.0`
|
||||||
|
- The VM is started after modification due to the `-s` flag
|
||||||
|
|
||||||
|
2. **Add PCI Device Without Other Changes:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
so-kvm-modify-hardware -v vm2_master -p 0000:c7:00.0
|
||||||
|
```
|
||||||
|
|
||||||
|
This command adds a single PCI device passthrough to the VM:
|
||||||
|
- VM Name: `vm2_master`
|
||||||
|
- PCI Device: `0000:c7:00.0`
|
||||||
|
- Existing CPU and memory settings are preserved
|
||||||
|
|
||||||
|
3. **Update Resource Allocation:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
so-kvm-modify-hardware -v vm3_search -c 2 -m 4096
|
||||||
|
```
|
||||||
|
|
||||||
|
This command updates only compute resources:
|
||||||
|
- VM Name: `vm3_search`
|
||||||
|
- CPUs: `2`
|
||||||
|
- Memory: `4096` MiB
|
||||||
|
- VM remains stopped after modification
|
||||||
|
|
||||||
|
4. **Add Multiple PCI Devices:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
so-kvm-modify-hardware -v vm4_node -p 0000:c7:00.0 -p 0000:c4:00.0 -p 0000:c4:00.1 -s
|
||||||
|
```
|
||||||
|
|
||||||
|
This command adds multiple PCI devices and starts the VM:
|
||||||
|
- VM Name: `vm4_node`
|
||||||
|
- PCI Devices: `0000:c7:00.0`, `0000:c4:00.0`, `0000:c4:00.1`
|
||||||
|
- VM is started after modification
|
||||||
|
|
||||||
|
**Notes:**
|
||||||
|
|
||||||
|
- The script automatically stops the VM if it's running before making modifications.
|
||||||
|
- At least one modification option (-c, -m, or -p) should be provided.
|
||||||
|
- The PCI hardware IDs must be in the format `domain:bus:device.function` (e.g., `0000:c7:00.0`).
|
||||||
|
- Multiple PCI devices can be added by using the `-p` option multiple times.
|
||||||
|
- Without the `-s` flag, the VM remains stopped after modification.
|
||||||
|
- Existing hardware configurations are preserved if not explicitly modified.
|
||||||
|
|
||||||
|
**Description:**
|
||||||
|
|
||||||
|
The `so-kvm-modify-hardware` script modifies hardware parameters of KVM virtual machines using the following process:
|
||||||
|
|
||||||
|
1. **VM State Management:**
|
||||||
|
- Connects to the local libvirt daemon
|
||||||
|
- Stops the VM if it's currently running
|
||||||
|
- Retrieves current VM configuration
|
||||||
|
|
||||||
|
2. **Hardware Configuration:**
|
||||||
|
- Modifies CPU count if specified
|
||||||
|
- Updates memory allocation if specified
|
||||||
|
- Adds PCI device passthrough configurations if specified
|
||||||
|
- All changes are made through libvirt XML configuration
|
||||||
|
|
||||||
|
3. **VM Redefinition:**
|
||||||
|
- Applies the new configuration by redefining the VM
|
||||||
|
- Optionally starts the VM if requested
|
||||||
|
- Ensures clean shutdown and startup during modifications
|
||||||
|
|
||||||
|
4. **Error Handling:**
|
||||||
|
- Validates all input parameters
|
||||||
|
- Ensures proper XML structure
|
||||||
|
- Provides detailed error messages for troubleshooting
|
||||||
|
|
||||||
|
**Exit Codes:**
|
||||||
|
|
||||||
|
- `0`: Success
|
||||||
|
- `1`: An error occurred during execution
|
||||||
|
|
||||||
|
**Logging:**
|
||||||
|
|
||||||
|
- Logs are written to `/opt/so/log/hypervisor/so-kvm-modify-hardware.log`
|
||||||
|
- Both file and console logging are enabled for real-time monitoring
|
||||||
|
- Log entries include timestamps and severity levels
|
||||||
|
- Detailed error messages are logged for troubleshooting
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import libvirt
|
||||||
|
import logging
|
||||||
|
import socket
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
from io import StringIO
|
||||||
|
from so_vm_utils import start_vm, stop_vm
|
||||||
|
from so_logging_utils import setup_logging
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
# Get hypervisor name from local hostname
|
||||||
|
HYPERVISOR = socket.gethostname()
|
||||||
|
|
||||||
|
# Custom log handler to capture output
|
||||||
|
class StringIOHandler(logging.Handler):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.strio = StringIO()
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
msg = self.format(record)
|
||||||
|
self.strio.write(msg + '\n')
|
||||||
|
|
||||||
|
def get_value(self):
|
||||||
|
return self.strio.getvalue()
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
parser = argparse.ArgumentParser(description='Modify hardware parameters of a KVM virtual machine.')
|
||||||
|
parser.add_argument('-v', '--vm', required=True, help='Name of the virtual machine to modify.')
|
||||||
|
parser.add_argument('-c', '--cpu', type=int, help='Number of virtual CPUs to assign.')
|
||||||
|
parser.add_argument('-m', '--memory', type=int, help='Amount of memory to assign in MiB.')
|
||||||
|
parser.add_argument('-p', '--pci', action='append', help='PCI hardware ID(s) to passthrough to the VM (e.g., 0000:00:1f.2). Can be specified multiple times.')
|
||||||
|
parser.add_argument('-s', '--start', action='store_true', help='Start the VM after modification.')
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
def modify_vm(dom, cpu_count, memory_amount, pci_ids, logger):
|
||||||
|
try:
|
||||||
|
# Get the XML description of the VM
|
||||||
|
xml_desc = dom.XMLDesc()
|
||||||
|
root = ET.fromstring(xml_desc)
|
||||||
|
|
||||||
|
# Modify CPU count
|
||||||
|
if cpu_count is not None:
|
||||||
|
vcpu_elem = root.find('./vcpu')
|
||||||
|
if vcpu_elem is not None:
|
||||||
|
vcpu_elem.text = str(cpu_count)
|
||||||
|
logger.info(f"Set CPU count to {cpu_count}.")
|
||||||
|
else:
|
||||||
|
logger.error("Could not find <vcpu> element in XML.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Modify memory amount
|
||||||
|
if memory_amount is not None:
|
||||||
|
memory_elem = root.find('./memory')
|
||||||
|
current_memory_elem = root.find('./currentMemory')
|
||||||
|
if memory_elem is not None and current_memory_elem is not None:
|
||||||
|
memory_elem.text = str(memory_amount * 1024) # Convert MiB to KiB
|
||||||
|
current_memory_elem.text = str(memory_amount * 1024)
|
||||||
|
logger.info(f"Set memory to {memory_amount} MiB.")
|
||||||
|
else:
|
||||||
|
logger.error("Could not find <memory> elements in XML.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Add PCI device passthrough(s)
|
||||||
|
if pci_ids:
|
||||||
|
devices_elem = root.find('./devices')
|
||||||
|
if devices_elem is not None:
|
||||||
|
for pci_id in pci_ids:
|
||||||
|
hostdev_elem = ET.SubElement(devices_elem, 'hostdev', attrib={
|
||||||
|
'mode': 'subsystem',
|
||||||
|
'type': 'pci',
|
||||||
|
'managed': 'yes'
|
||||||
|
})
|
||||||
|
source_elem = ET.SubElement(hostdev_elem, 'source')
|
||||||
|
# Split PCI ID into components (domain:bus:slot.function)
|
||||||
|
parts = pci_id.split(':')
|
||||||
|
if len(parts) != 3:
|
||||||
|
logger.error(f"Invalid PCI ID format: {pci_id}. Expected format: domain:bus:slot.function")
|
||||||
|
sys.exit(1)
|
||||||
|
domain_id = parts[0]
|
||||||
|
bus = parts[1]
|
||||||
|
slot_func = parts[2].split('.')
|
||||||
|
if len(slot_func) != 2:
|
||||||
|
logger.error(f"Invalid PCI ID format: {pci_id}. Expected format: domain:bus:slot.function")
|
||||||
|
sys.exit(1)
|
||||||
|
slot = slot_func[0]
|
||||||
|
function = slot_func[1]
|
||||||
|
address_attrs = {
|
||||||
|
'domain': f'0x{domain_id}',
|
||||||
|
'bus': f'0x{bus}',
|
||||||
|
'slot': f'0x{slot}',
|
||||||
|
'function': f'0x{function}'
|
||||||
|
}
|
||||||
|
ET.SubElement(source_elem, 'address', attrib=address_attrs)
|
||||||
|
logger.info(f"Added PCI device passthrough for {pci_id}.")
|
||||||
|
else:
|
||||||
|
logger.error("Could not find <devices> element in XML.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Convert XML back to string
|
||||||
|
new_xml_desc = ET.tostring(root, encoding='unicode')
|
||||||
|
return new_xml_desc
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to modify VM XML: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def redefine_vm(conn, new_xml_desc, logger):
|
||||||
|
try:
|
||||||
|
conn.defineXML(new_xml_desc)
|
||||||
|
logger.info("VM redefined with new hardware parameters.")
|
||||||
|
except libvirt.libvirtError as e:
|
||||||
|
logger.error(f"Failed to redefine VM: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Set up logging using the so_logging_utils library
|
||||||
|
string_handler = StringIOHandler()
|
||||||
|
string_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||||
|
logger = setup_logging(
|
||||||
|
logger_name='so-kvm-modify-hardware',
|
||||||
|
log_file_path='/opt/so/log/hypervisor/so-kvm-modify-hardware.log',
|
||||||
|
log_level=logging.INFO,
|
||||||
|
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
logger.addHandler(string_handler)
|
||||||
|
|
||||||
|
try:
|
||||||
|
args = parse_arguments()
|
||||||
|
|
||||||
|
vm_name = args.vm
|
||||||
|
cpu_count = args.cpu
|
||||||
|
memory_amount = args.memory
|
||||||
|
pci_ids = args.pci # This will be a list or None
|
||||||
|
start_vm_flag = args.start
|
||||||
|
|
||||||
|
# Connect to libvirt
|
||||||
|
try:
|
||||||
|
conn = libvirt.open(None)
|
||||||
|
except libvirt.libvirtError as e:
|
||||||
|
logger.error(f"Failed to open connection to libvirt: {e}")
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', vm_name,
|
||||||
|
'-H', HYPERVISOR,
|
||||||
|
'-s', 'Hardware Configuration Failed'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit failure status event: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Stop VM if running
|
||||||
|
dom = stop_vm(conn, vm_name, logger)
|
||||||
|
|
||||||
|
# Modify VM XML
|
||||||
|
new_xml_desc = modify_vm(dom, cpu_count, memory_amount, pci_ids, logger)
|
||||||
|
|
||||||
|
# Redefine VM
|
||||||
|
redefine_vm(conn, new_xml_desc, logger)
|
||||||
|
|
||||||
|
# Start VM if -s or --start argument is provided
|
||||||
|
if start_vm_flag:
|
||||||
|
dom = conn.lookupByName(vm_name)
|
||||||
|
start_vm(dom, logger)
|
||||||
|
logger.info(f"VM '{vm_name}' started successfully.")
|
||||||
|
else:
|
||||||
|
logger.info("VM start flag not provided; VM will remain stopped.")
|
||||||
|
|
||||||
|
# Close connection
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
# Send success status event
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', vm_name,
|
||||||
|
'-H', HYPERVISOR,
|
||||||
|
'-s', 'Hardware Configuration'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit success status event: {e}")
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
error_msg = "Operation cancelled by user"
|
||||||
|
logger.error(error_msg)
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', vm_name,
|
||||||
|
'-H', HYPERVISOR,
|
||||||
|
'-s', 'Hardware Configuration Failed'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit failure status event: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = str(e)
|
||||||
|
if "Failed to open connection to libvirt" in error_msg:
|
||||||
|
error_msg = f"Failed to connect to libvirt: {error_msg}"
|
||||||
|
elif "Failed to redefine VM" in error_msg:
|
||||||
|
error_msg = f"Failed to apply hardware changes: {error_msg}"
|
||||||
|
elif "Failed to modify VM XML" in error_msg:
|
||||||
|
error_msg = f"Failed to update hardware configuration: {error_msg}"
|
||||||
|
else:
|
||||||
|
error_msg = f"An error occurred: {error_msg}"
|
||||||
|
logger.error(error_msg)
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', vm_name,
|
||||||
|
'-h', HYPERVISOR,
|
||||||
|
'-s', 'Hardware Configuration Failed'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit failure status event: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
||||||
|
{%- else -%}
|
||||||
|
|
||||||
|
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
|
||||||
|
{% endif -%}
|
||||||
531
salt/hypervisor/tools/sbin_jinja/so-qcow2-modify-network
Normal file
531
salt/hypervisor/tools/sbin_jinja/so-qcow2-modify-network
Normal file
@@ -0,0 +1,531 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||||
|
|
||||||
|
"""
|
||||||
|
Script for modifying network configurations within QCOW2 virtual machine images. This script provides
|
||||||
|
functionality to update NetworkManager settings, supporting both DHCP and static IP configurations
|
||||||
|
without requiring the VM to be running.
|
||||||
|
|
||||||
|
The script offers two main configuration modes:
|
||||||
|
1. DHCP Configuration: Enable automatic IP address assignment
|
||||||
|
2. Static IP Configuration: Set specific IP address, gateway, DNS servers, and search domains
|
||||||
|
|
||||||
|
For both configuration modes, the script automatically sets the following NetworkManager connection properties:
|
||||||
|
- connection.autoconnect: yes (ensures interface connects automatically)
|
||||||
|
- connection.autoconnect-priority: 999 (sets connection priority)
|
||||||
|
- connection.autoconnect-retries: -1 (unlimited connection retries)
|
||||||
|
- connection.multi-connect: 0 (single connection mode)
|
||||||
|
- connection.wait-device-timeout: -1 (wait indefinitely for device)
|
||||||
|
|
||||||
|
This script is designed to work with Security Onion's virtualization infrastructure and is typically
|
||||||
|
used during VM provisioning and network reconfiguration tasks.
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
so-qcow2-modify-network -I <qcow2_image_path> -i <interface> (--dhcp4 | --static4 --ip4 <ip_address> --gw4 <gateway>)
|
||||||
|
[--dns4 <dns_servers>] [--search4 <search_domain>]
|
||||||
|
|
||||||
|
**Options:**
|
||||||
|
-I, --image Path to the QCOW2 image.
|
||||||
|
-i, --interface Network interface to modify (e.g., enp1s0).
|
||||||
|
--dhcp4 Configure interface for DHCP (IPv4).
|
||||||
|
--static4 Configure interface for static IPv4 settings.
|
||||||
|
--ip4 IPv4 address (e.g., 192.168.1.10/24). Required for static IPv4 configuration.
|
||||||
|
--gw4 IPv4 gateway (e.g., 192.168.1.1). Required for static IPv4 configuration.
|
||||||
|
--dns4 Comma-separated list of IPv4 DNS servers (e.g., 8.8.8.8,8.8.4.4).
|
||||||
|
--search4 DNS search domain for IPv4.
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
1. **Static IP Configuration with DNS and Search Domain:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
so-qcow2-modify-network -I /nsm/libvirt/images/sool9/sool9.qcow2 -i enp1s0 --static4 \
|
||||||
|
--ip4 192.168.1.10/24 --gw4 192.168.1.1 --dns4 192.168.1.1,192.168.1.2 --search4 example.local
|
||||||
|
```
|
||||||
|
|
||||||
|
This command configures the network settings in the QCOW2 image with:
|
||||||
|
- Static IPv4 configuration:
|
||||||
|
- IP Address: `192.168.1.10/24`
|
||||||
|
- Gateway: `192.168.1.1`
|
||||||
|
- DNS Servers: `192.168.1.1`, `192.168.1.2`
|
||||||
|
- DNS Search Domain: `example.local`
|
||||||
|
|
||||||
|
2. **DHCP Configuration:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
so-qcow2-modify-network -I /nsm/libvirt/images/sool9/sool9.qcow2 -i enp1s0 --dhcp4
|
||||||
|
```
|
||||||
|
|
||||||
|
This command configures the network interface to use DHCP for automatic IP address assignment.
|
||||||
|
|
||||||
|
3. **Static IP Configuration without DNS Settings:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
so-qcow2-modify-network -I /nsm/libvirt/images/sool9/sool9.qcow2 -i enp1s0 --static4 \
|
||||||
|
--ip4 192.168.1.20/24 --gw4 192.168.1.1
|
||||||
|
```
|
||||||
|
|
||||||
|
This command sets only the basic static IP configuration:
|
||||||
|
- IP Address: `192.168.1.20/24`
|
||||||
|
- Gateway: `192.168.1.1`
|
||||||
|
|
||||||
|
**Notes:**
|
||||||
|
|
||||||
|
- When using `--static4`, both `--ip4` and `--gw4` options are required.
|
||||||
|
- The script validates IP addresses, DNS servers, and interface names before making any changes.
|
||||||
|
- DNS servers can be specified as a comma-separated list for multiple servers.
|
||||||
|
- The script requires write permissions for the QCOW2 image file.
|
||||||
|
- Interface names must contain only alphanumeric characters, underscores, and hyphens.
|
||||||
|
|
||||||
|
**Description:**
|
||||||
|
|
||||||
|
The `so-qcow2-modify-network` script modifies network configuration within a QCOW2 image using the following process:
|
||||||
|
|
||||||
|
1. **Image Access:**
|
||||||
|
- Mounts the QCOW2 image using libguestfs
|
||||||
|
- Locates and accesses the NetworkManager configuration directory
|
||||||
|
|
||||||
|
2. **Configuration Update:**
|
||||||
|
- Reads the existing network configuration for the specified interface
|
||||||
|
- Updates IPv4 settings based on provided parameters
|
||||||
|
- Supports both DHCP and static IP configurations
|
||||||
|
- Validates all input parameters before making changes
|
||||||
|
|
||||||
|
3. **File Management:**
|
||||||
|
- Creates or updates the NetworkManager connection file
|
||||||
|
- Maintains proper file permissions and format
|
||||||
|
- Safely unmounts the image after changes
|
||||||
|
|
||||||
|
**Exit Codes:**
|
||||||
|
|
||||||
|
- `0`: Success
|
||||||
|
- Non-zero: An error occurred during execution
|
||||||
|
|
||||||
|
**Logging:**
|
||||||
|
|
||||||
|
- Logs are written to `/opt/so/log/hypervisor/so-qcow2-modify-network.log`
|
||||||
|
- Both file and console logging are enabled for real-time monitoring
|
||||||
|
- Log entries include:
|
||||||
|
- Timestamps in ISO 8601 format
|
||||||
|
- Severity levels (INFO, WARNING, ERROR)
|
||||||
|
- Detailed error messages for troubleshooting
|
||||||
|
- Critical operations logged:
|
||||||
|
- Network configuration changes
|
||||||
|
- Image mount/unmount operations
|
||||||
|
- Validation failures
|
||||||
|
- File access errors
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import guestfs
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import ipaddress
|
||||||
|
import configparser
|
||||||
|
import uuid
|
||||||
|
from io import StringIO
|
||||||
|
import libvirt
|
||||||
|
from so_logging_utils import setup_logging
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
# Get hypervisor name from local hostname
|
||||||
|
HYPERVISOR = socket.gethostname()
|
||||||
|
|
||||||
|
# Custom log handler to capture output
|
||||||
|
class StringIOHandler(logging.Handler):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.strio = StringIO()
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
msg = self.format(record)
|
||||||
|
self.strio.write(msg + '\n')
|
||||||
|
|
||||||
|
def get_value(self):
|
||||||
|
return self.strio.getvalue()
|
||||||
|
|
||||||
|
# Set up logging using the so_logging_utils library
|
||||||
|
string_handler = StringIOHandler()
|
||||||
|
string_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||||
|
logger = setup_logging(
|
||||||
|
logger_name='so-qcow2-modify-network',
|
||||||
|
log_file_path='/opt/so/log/hypervisor/so-qcow2-modify-network.log',
|
||||||
|
log_level=logging.INFO,
|
||||||
|
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
logger.addHandler(string_handler)
|
||||||
|
|
||||||
|
NETWORK_CONFIG_DIR = "/etc/NetworkManager/system-connections"
|
||||||
|
|
||||||
|
def validate_ip_address(ip_str, description="IP address"):
|
||||||
|
try:
|
||||||
|
ipaddress.IPv4Interface(ip_str)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
ipaddress.IPv4Address(ip_str)
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(f"Invalid {description}: {ip_str}")
|
||||||
|
|
||||||
|
def validate_dns_addresses(dns_str):
|
||||||
|
dns_list = dns_str.split(',')
|
||||||
|
for dns in dns_list:
|
||||||
|
dns = dns.strip()
|
||||||
|
validate_ip_address(dns, description="DNS server address")
|
||||||
|
|
||||||
|
def validate_interface_name(interface_name):
|
||||||
|
if not re.match(r'^[a-zA-Z0-9_\-]+$', interface_name):
|
||||||
|
raise ValueError(f"Invalid interface name: {interface_name}")
|
||||||
|
|
||||||
|
def check_base_domain_status(image_path):
|
||||||
|
"""
|
||||||
|
Check if the base domain corresponding to the image path is currently running.
|
||||||
|
Base domains should not be running when modifying their configuration.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
image_path (str): Path to the QCOW2 image.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the base domain is running, False otherwise.
|
||||||
|
"""
|
||||||
|
base_domain = os.path.basename(os.path.dirname(image_path))
|
||||||
|
logger.info(f"Verifying base domain status for image: {image_path}")
|
||||||
|
logger.info(f"Checking if base domain '{base_domain}' is running...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
conn = libvirt.open('qemu:///system')
|
||||||
|
try:
|
||||||
|
dom = conn.lookupByName(base_domain)
|
||||||
|
is_running = dom.isActive()
|
||||||
|
if is_running:
|
||||||
|
logger.error(f"Base domain '{base_domain}' is running - cannot modify configuration")
|
||||||
|
return is_running
|
||||||
|
except libvirt.libvirtError:
|
||||||
|
logger.info(f"Base domain '{base_domain}' not found or not running")
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
except libvirt.libvirtError as e:
|
||||||
|
logger.error(f"Failed to connect to libvirt: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def update_network_config(content, mode, ip=None, gateway=None, dns=None, search_domain=None):
|
||||||
|
config = configparser.ConfigParser(strict=False)
|
||||||
|
config.optionxform = str
|
||||||
|
config.read_string(content)
|
||||||
|
|
||||||
|
# Ensure connection section exists and set required properties
|
||||||
|
if 'connection' not in config.sections():
|
||||||
|
logger.info("Creating new connection section in network configuration")
|
||||||
|
config.add_section('connection')
|
||||||
|
|
||||||
|
# Set mandatory connection properties
|
||||||
|
config.set('connection', 'autoconnect', 'yes')
|
||||||
|
config.set('connection', 'autoconnect-priority', '999')
|
||||||
|
config.set('connection', 'autoconnect-retries', '-1')
|
||||||
|
config.set('connection', 'multi-connect', '0')
|
||||||
|
config.set('connection', 'wait-device-timeout', '-1')
|
||||||
|
|
||||||
|
# Ensure ipv4 section exists
|
||||||
|
if 'ipv4' not in config.sections():
|
||||||
|
logger.info("Creating new IPv4 section in network configuration")
|
||||||
|
config.add_section('ipv4')
|
||||||
|
|
||||||
|
if mode == "dhcp4":
|
||||||
|
logger.info("Configuring DHCP settings:")
|
||||||
|
logger.info(" method: auto (DHCP enabled)")
|
||||||
|
logger.info(" Removing any existing static configuration")
|
||||||
|
config.set('ipv4', 'method', 'auto')
|
||||||
|
config.remove_option('ipv4', 'address1')
|
||||||
|
config.remove_option('ipv4', 'addresses')
|
||||||
|
config.remove_option('ipv4', 'dns')
|
||||||
|
config.remove_option('ipv4', 'dns-search')
|
||||||
|
elif mode == "static4":
|
||||||
|
logger.info("Configuring static IP settings:")
|
||||||
|
logger.info(" method: manual (static configuration)")
|
||||||
|
config.set('ipv4', 'method', 'manual')
|
||||||
|
if ip and gateway:
|
||||||
|
logger.info(f" Setting address: {ip}")
|
||||||
|
logger.info(f" Setting gateway: {gateway}")
|
||||||
|
config.set('ipv4', 'address1', f"{ip},{gateway}")
|
||||||
|
else:
|
||||||
|
logger.error("Missing required IP address or gateway for static configuration")
|
||||||
|
raise ValueError("Both IP address and gateway are required for static configuration.")
|
||||||
|
if dns:
|
||||||
|
logger.info(f" Setting DNS servers: {dns}")
|
||||||
|
config.set('ipv4', 'dns', f"{dns};")
|
||||||
|
else:
|
||||||
|
logger.info(" No DNS servers specified")
|
||||||
|
config.remove_option('ipv4', 'dns')
|
||||||
|
if search_domain:
|
||||||
|
logger.info(f" Setting search domain: {search_domain}")
|
||||||
|
config.set('ipv4', 'dns-search', f"{search_domain};")
|
||||||
|
else:
|
||||||
|
logger.info(" No search domain specified")
|
||||||
|
config.remove_option('ipv4', 'dns-search')
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid mode '{mode}'. Expected 'dhcp4' or 'static4'.")
|
||||||
|
|
||||||
|
output = StringIO()
|
||||||
|
config.write(output, space_around_delimiters=False)
|
||||||
|
updated_content = output.getvalue()
|
||||||
|
output.close()
|
||||||
|
|
||||||
|
return updated_content
|
||||||
|
|
||||||
|
def modify_network_config(image_path, interface, mode, ip=None, gateway=None, dns=None, search_domain=None):
|
||||||
|
"""
|
||||||
|
Modifies network configuration in a QCOW2 image, ensuring specific connection settings are set.
|
||||||
|
|
||||||
|
Handles both eth0 and predictable network interface names (e.g., enp1s0).
|
||||||
|
If the requested interface configuration is not found but eth0.nmconnection exists,
|
||||||
|
it will be renamed and updated with the proper interface configuration.
|
||||||
|
"""
|
||||||
|
# Check if base domain is running
|
||||||
|
if check_base_domain_status(image_path):
|
||||||
|
raise RuntimeError("Cannot modify network configuration while base domain is running")
|
||||||
|
|
||||||
|
if not os.access(image_path, os.W_OK):
|
||||||
|
logger.error(f"Permission denied: Cannot write to image file {image_path}")
|
||||||
|
raise PermissionError(f"Write permission denied for image file: {image_path}")
|
||||||
|
|
||||||
|
logger.info(f"Configuring network for VM image: {image_path}")
|
||||||
|
logger.info(f"Network configuration details for interface {interface}:")
|
||||||
|
logger.info(f" Mode: {mode.upper()}")
|
||||||
|
if mode == "static4":
|
||||||
|
logger.info(f" IP Address: {ip}")
|
||||||
|
logger.info(f" Gateway: {gateway}")
|
||||||
|
logger.info(f" DNS Servers: {dns if dns else 'Not configured'}")
|
||||||
|
logger.info(f" Search Domain: {search_domain if search_domain else 'Not configured'}")
|
||||||
|
|
||||||
|
g = guestfs.GuestFS(python_return_dict=True)
|
||||||
|
try:
|
||||||
|
logger.info("Initializing GuestFS and mounting image...")
|
||||||
|
g.set_network(False)
|
||||||
|
g.selinux = False
|
||||||
|
g.add_drive_opts(image_path, format="qcow2")
|
||||||
|
g.launch()
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.error(f"Failed to initialize GuestFS: {e}")
|
||||||
|
raise RuntimeError(f"Failed to initialize GuestFS or launch appliance: {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
os_list = g.inspect_os()
|
||||||
|
if not os_list:
|
||||||
|
logger.error(f"No operating system found in image: {image_path}")
|
||||||
|
raise RuntimeError(f"Unable to find any OS in {image_path}.")
|
||||||
|
|
||||||
|
root_fs = os_list[0]
|
||||||
|
try:
|
||||||
|
g.mount(root_fs, "/")
|
||||||
|
logger.info("Successfully mounted VM image filesystem")
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.error(f"Failed to mount filesystem: {e}")
|
||||||
|
raise RuntimeError(f"Failed to mount the filesystem: {e}")
|
||||||
|
|
||||||
|
if not g.is_dir(NETWORK_CONFIG_DIR):
|
||||||
|
logger.error(f"NetworkManager configuration directory not found: {NETWORK_CONFIG_DIR}")
|
||||||
|
raise FileNotFoundError(f"NetworkManager configuration directory not found in the image at {NETWORK_CONFIG_DIR}.")
|
||||||
|
|
||||||
|
requested_config_path = f"{NETWORK_CONFIG_DIR}/{interface}.nmconnection"
|
||||||
|
eth0_config_path = f"{NETWORK_CONFIG_DIR}/eth0.nmconnection"
|
||||||
|
config_file_path = None
|
||||||
|
current_content = None
|
||||||
|
|
||||||
|
# Try to read the requested interface config first
|
||||||
|
try:
|
||||||
|
file_content = g.read_file(requested_config_path)
|
||||||
|
current_content = file_content.decode('utf-8')
|
||||||
|
config_file_path = requested_config_path
|
||||||
|
logger.info(f"Found existing network configuration for interface {interface}")
|
||||||
|
except RuntimeError:
|
||||||
|
# If not found, try eth0 config
|
||||||
|
try:
|
||||||
|
file_content = g.read_file(eth0_config_path)
|
||||||
|
current_content = file_content.decode('utf-8')
|
||||||
|
config_file_path = eth0_config_path
|
||||||
|
logger.info("Found eth0 network configuration, will update for new interface")
|
||||||
|
except RuntimeError:
|
||||||
|
logger.error(f"No network configuration found for either {interface} or eth0")
|
||||||
|
raise FileNotFoundError(f"No network configuration found at {requested_config_path} or {eth0_config_path}")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
logger.error(f"Failed to decode network configuration file")
|
||||||
|
raise ValueError(f"Failed to decode the configuration file")
|
||||||
|
|
||||||
|
# If using eth0 config, update interface-specific fields
|
||||||
|
if config_file_path == eth0_config_path:
|
||||||
|
config = configparser.ConfigParser(strict=False)
|
||||||
|
config.optionxform = str
|
||||||
|
config.read_string(current_content)
|
||||||
|
|
||||||
|
if 'connection' not in config.sections():
|
||||||
|
config.add_section('connection')
|
||||||
|
|
||||||
|
# Update interface-specific fields
|
||||||
|
config.set('connection', 'id', interface)
|
||||||
|
config.set('connection', 'interface-name', interface)
|
||||||
|
config.set('connection', 'uuid', str(uuid.uuid4()))
|
||||||
|
|
||||||
|
# Write updated content back to string
|
||||||
|
output = StringIO()
|
||||||
|
config.write(output, space_around_delimiters=False)
|
||||||
|
current_content = output.getvalue()
|
||||||
|
output.close()
|
||||||
|
|
||||||
|
# Update config file path to new interface name
|
||||||
|
config_file_path = requested_config_path
|
||||||
|
|
||||||
|
logger.info("Applying network configuration changes...")
|
||||||
|
updated_content = update_network_config(current_content, mode, ip, gateway, dns, search_domain)
|
||||||
|
|
||||||
|
try:
|
||||||
|
g.write(config_file_path, updated_content.encode('utf-8'))
|
||||||
|
# Set proper permissions (600) on the network configuration file
|
||||||
|
g.chmod(0o600, config_file_path)
|
||||||
|
logger.info("Successfully wrote updated network configuration with proper permissions (600)")
|
||||||
|
|
||||||
|
# If we renamed eth0 to the new interface, remove the old eth0 config
|
||||||
|
if config_file_path == requested_config_path and eth0_config_path != requested_config_path:
|
||||||
|
try:
|
||||||
|
g.rm(eth0_config_path)
|
||||||
|
logger.info("Removed old eth0 configuration file")
|
||||||
|
except RuntimeError:
|
||||||
|
logger.warning("Could not remove old eth0 configuration file - it may have already been removed")
|
||||||
|
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.error(f"Failed to write network configuration: {e}")
|
||||||
|
raise IOError(f"Failed to write updated configuration to {config_file_path}: {e}")
|
||||||
|
|
||||||
|
logger.info(f"Successfully updated network configuration:")
|
||||||
|
logger.info(f" Image: {image_path}")
|
||||||
|
logger.info(f" Interface: {interface}")
|
||||||
|
logger.info(f" Mode: {mode.upper()}")
|
||||||
|
if mode == "static4":
|
||||||
|
logger.info(f" Settings applied:")
|
||||||
|
logger.info(f" IP Address: {ip}")
|
||||||
|
logger.info(f" Gateway: {gateway}")
|
||||||
|
logger.info(f" DNS Servers: {dns if dns else 'Not configured'}")
|
||||||
|
logger.info(f" Search Domain: {search_domain if search_domain else 'Not configured'}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
|
finally:
|
||||||
|
g.umount_all()
|
||||||
|
g.close()
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
parser = argparse.ArgumentParser(description="Modify IPv4 settings in a QCOW2 image for a specified network interface.")
|
||||||
|
parser.add_argument("-I", "--image", required=True, help="Path to the QCOW2 image.")
|
||||||
|
parser.add_argument("-i", "--interface", required=True, help="Network interface to modify (e.g., enp1s0).")
|
||||||
|
parser.add_argument("-n", "--vm-name", required=True, help="Full name of the VM (hostname_role).")
|
||||||
|
group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
group.add_argument("--dhcp4", action="store_true", help="Configure interface for DHCP (IPv4).")
|
||||||
|
group.add_argument("--static4", action="store_true", help="Configure interface for static IPv4 settings.")
|
||||||
|
parser.add_argument("--ip4", help="IPv4 address (e.g., 192.168.1.10/24). Required for static IPv4 configuration.")
|
||||||
|
parser.add_argument("--gw4", help="IPv4 gateway (e.g., 192.168.1.1). Required for static IPv4 configuration.")
|
||||||
|
parser.add_argument("--dns4", help="Comma-separated list of IPv4 DNS servers (e.g., 8.8.8.8,8.8.4.4).")
|
||||||
|
parser.add_argument("--search4", help="DNS search domain for IPv4.")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.static4:
|
||||||
|
if not args.ip4 or not args.gw4:
|
||||||
|
parser.error("Both --ip4 and --gw4 are required for static IPv4 configuration.")
|
||||||
|
return args
|
||||||
|
|
||||||
|
def main():
|
||||||
|
try:
|
||||||
|
logger.info("Starting network configuration update...")
|
||||||
|
args = parse_arguments()
|
||||||
|
|
||||||
|
logger.info("Validating interface name...")
|
||||||
|
validate_interface_name(args.interface)
|
||||||
|
|
||||||
|
if args.dhcp4:
|
||||||
|
mode = "dhcp4"
|
||||||
|
logger.info("Using DHCP configuration mode")
|
||||||
|
elif args.static4:
|
||||||
|
mode = "static4"
|
||||||
|
logger.info("Using static IP configuration mode")
|
||||||
|
if not args.ip4 or not args.gw4:
|
||||||
|
logger.error("Missing required parameters for static configuration")
|
||||||
|
raise ValueError("Both --ip4 and --gw4 are required for static IPv4 configuration.")
|
||||||
|
|
||||||
|
logger.info("Validating IP addresses...")
|
||||||
|
validate_ip_address(args.ip4, description="IPv4 address")
|
||||||
|
validate_ip_address(args.gw4, description="IPv4 gateway")
|
||||||
|
if args.dns4:
|
||||||
|
validate_dns_addresses(args.dns4)
|
||||||
|
else:
|
||||||
|
logger.error("No configuration mode specified")
|
||||||
|
raise ValueError("Either --dhcp4 or --static4 must be specified.")
|
||||||
|
|
||||||
|
modify_network_config(args.image, args.interface, mode, args.ip4, args.gw4, args.dns4, args.search4)
|
||||||
|
logger.info("Network configuration update completed successfully")
|
||||||
|
|
||||||
|
# Send success status event
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', args.vm_name,
|
||||||
|
'-H', HYPERVISOR,
|
||||||
|
'-s', 'IP Configuration'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit success status event: {e}")
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
error_msg = "Operation cancelled by user"
|
||||||
|
logger.error(error_msg)
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', args.vm_name,
|
||||||
|
'-H', HYPERVISOR,
|
||||||
|
'-s', 'IP Configuration Failed'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit failure status event: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = str(e)
|
||||||
|
if "base domain is running" in error_msg:
|
||||||
|
logger.error("Cannot proceed: Base domain must not be running when modifying network configuration")
|
||||||
|
error_msg = "Base domain must not be running when modifying network configuration"
|
||||||
|
else:
|
||||||
|
logger.error(f"An error occurred: {e}")
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', args.vm_name,
|
||||||
|
'-H', HYPERVISOR,
|
||||||
|
'-s', 'IP Configuration Failed'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit failure status event: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
||||||
|
{%- else -%}
|
||||||
|
|
||||||
|
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
|
||||||
|
{% endif -%}
|
||||||
18
salt/libvirt/64962/init.sls
Normal file
18
salt/libvirt/64962/init.sls
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
python3_lief:
|
||||||
|
pkg.installed:
|
||||||
|
- name: securityonion-python3-lief
|
||||||
|
|
||||||
|
so-fix-salt-ldap_script:
|
||||||
|
file.managed:
|
||||||
|
- name: /usr/sbin/so-fix-salt-ldap.py
|
||||||
|
- source: salt://libvirt/64962/scripts/so-fix-salt-ldap.py
|
||||||
|
- mode: 744
|
||||||
|
|
||||||
|
fix-salt-ldap:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-fix-salt-ldap.py
|
||||||
|
- require:
|
||||||
|
- pkg: python3_lief
|
||||||
|
- file: so-fix-salt-ldap_script
|
||||||
|
- onchanges:
|
||||||
|
- file: so-fix-salt-ldap_script
|
||||||
79
salt/libvirt/64962/scripts/so-fix-salt-ldap.py
Normal file
79
salt/libvirt/64962/scripts/so-fix-salt-ldap.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# this script comes from the user nf-brentsaner located here https://github.com/saltstack/salt/issues/64962
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import grp
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import pwd
|
||||||
|
import shutil
|
||||||
|
##
|
||||||
|
import dbus # dnf -y install python3-dbus
|
||||||
|
##
|
||||||
|
import lief # https://pypi.org/project/lief/
|
||||||
|
|
||||||
|
salt_root = pathlib.Path('/opt/saltstack')
|
||||||
|
src_lib = pathlib.Path('/lib64/libldap.so.2')
|
||||||
|
dst_lib = salt_root.joinpath('salt', 'lib', 'libldap.so.2')
|
||||||
|
|
||||||
|
uname = 'salt'
|
||||||
|
gname = 'salt'
|
||||||
|
|
||||||
|
lib = lief.parse(str(src_lib))
|
||||||
|
|
||||||
|
sym = next((i for i in lib.imported_symbols if i.name == 'EVP_md2'), None)
|
||||||
|
|
||||||
|
if sym:
|
||||||
|
# Get the Salt services from DBus.
|
||||||
|
sysbus = dbus.SystemBus()
|
||||||
|
sysd = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
|
||||||
|
mgr = dbus.Interface(sysd, 'org.freedesktop.systemd1.Manager')
|
||||||
|
svcs = []
|
||||||
|
for i in mgr.ListUnits():
|
||||||
|
# first element is unit name.
|
||||||
|
if not str(i[0]).startswith('salt-'):
|
||||||
|
continue
|
||||||
|
svc = sysbus.get_object('org.freedesktop.systemd1', object_path = mgr.GetUnit(str(i[0])))
|
||||||
|
props = dbus.Interface(svc, dbus_interface = 'org.freedesktop.DBus.Properties')
|
||||||
|
state = props.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
|
||||||
|
if str(state) == 'active':
|
||||||
|
svcs.append(i[0])
|
||||||
|
# Get the user/group
|
||||||
|
u = pwd.getpwnam(uname)
|
||||||
|
g = grp.getgrnam(gname)
|
||||||
|
# Modify
|
||||||
|
print('Modifications necessary.')
|
||||||
|
if svcs:
|
||||||
|
# Stop the services first.
|
||||||
|
for sn in svcs:
|
||||||
|
mgr.StopUnit(sn, 'replace')
|
||||||
|
if dst_lib.exists():
|
||||||
|
# 3.10 deprecated .utcnow().
|
||||||
|
#dst_lib_bak = pathlib.Path(str(dst_lib) + '.bak_{0}'.format(datetime.datetime.now(datetime.UTC).timestamp()))
|
||||||
|
dst_lib_bak = pathlib.Path(str(dst_lib) + '.bak_{0}'.format(datetime.datetime.utcnow().timestamp()))
|
||||||
|
os.rename(dst_lib, dst_lib_bak)
|
||||||
|
print('Destination file {0} exists; backed up to {1}.'.format(dst_lib, dst_lib_bak))
|
||||||
|
lib.remove_dynamic_symbol(sym)
|
||||||
|
lib.write(str(dst_lib))
|
||||||
|
os.chown(dst_lib, u.pw_uid, g.gr_gid)
|
||||||
|
os.chmod(dst_lib, src_lib.stat().st_mode)
|
||||||
|
# Before we restart services, we also want to remove any python caches.
|
||||||
|
for root, dirs, files in os.walk(salt_root):
|
||||||
|
for f in files:
|
||||||
|
if f.lower().endswith('.pyc'):
|
||||||
|
fpath = os.path.join(root, f)
|
||||||
|
os.remove(fpath)
|
||||||
|
print('Removed file {0}'.format(fpath))
|
||||||
|
if '__pycache__' in dirs:
|
||||||
|
dpath = os.path.join(root, '__pycache__')
|
||||||
|
shutil.rmtree(dpath)
|
||||||
|
print('Removed directory {0}'.format(dpath))
|
||||||
|
# And then start the units that were started before.
|
||||||
|
if svcs:
|
||||||
|
for sn in svcs:
|
||||||
|
mgr.RestartUnit(sn, 'replace')
|
||||||
|
else:
|
||||||
|
print('No EVP_md2 symbol found in the library. No modifications needed.')
|
||||||
|
|
||||||
|
print('Done.')
|
||||||
49
salt/libvirt/bridge.sls
Normal file
49
salt/libvirt/bridge.sls
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'libvirt/map.jinja' import LIBVIRTMERGED %}
|
||||||
|
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
|
||||||
|
|
||||||
|
down_original_mgmt_interface:
|
||||||
|
cmd.run:
|
||||||
|
- name: "nmcli con down {{ pillar.host.mainint }}"
|
||||||
|
- unless:
|
||||||
|
- nmcli -f GENERAL.CONNECTION dev show {{ pillar.host.mainint }} | grep bridge-slave-{{ pillar.host.mainint }}
|
||||||
|
- order: last
|
||||||
|
|
||||||
|
wait_for_br0_ip:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
counter=0
|
||||||
|
until ip addr show br0 | grep -q "inet "; do
|
||||||
|
sleep 1
|
||||||
|
counter=$((counter+1))
|
||||||
|
if [ $counter -ge 90 ]; then
|
||||||
|
echo "Timeout waiting for br0 to get an IP address"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "br0 has IP address: $(ip addr show br0 | grep 'inet ' | awk '{print $2}')"
|
||||||
|
- timeout: 95
|
||||||
|
- onchanges:
|
||||||
|
- cmd: down_original_mgmt_interface
|
||||||
|
|
||||||
|
update_mine_functions:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/minion.d/mine_functions.conf
|
||||||
|
- contents: |
|
||||||
|
mine_interval: 25
|
||||||
|
mine_functions:
|
||||||
|
network.ip_addrs:
|
||||||
|
- interface: br0
|
||||||
|
- onchanges:
|
||||||
|
- cmd: wait_for_br0_ip
|
||||||
|
|
||||||
|
restart_salt_minion_service:
|
||||||
|
service.running:
|
||||||
|
- name: salt-minion
|
||||||
|
- enable: True
|
||||||
|
- listen:
|
||||||
|
- file: update_mine_functions
|
||||||
53
salt/libvirt/defaults.yaml
Normal file
53
salt/libvirt/defaults.yaml
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
libvirt:
|
||||||
|
config:
|
||||||
|
listen_tls: 1
|
||||||
|
listen_tcp: 0
|
||||||
|
tls_port: "16514"
|
||||||
|
tcp_port: "16509"
|
||||||
|
listen_addr: "0.0.0.0"
|
||||||
|
unix_sock_group: "root"
|
||||||
|
unix_sock_ro_perms: "0777"
|
||||||
|
unix_sock_rw_perms: "0770"
|
||||||
|
unix_sock_admin_perms: "0700"
|
||||||
|
unix_sock_dir: "/run/libvirt"
|
||||||
|
auth_unix_ro: "polkit"
|
||||||
|
auth_unix_rw: "polkit"
|
||||||
|
auth_tcp: "sasl"
|
||||||
|
auth_tls: "none"
|
||||||
|
tcp_min_ssf: 112
|
||||||
|
access_drivers: ["polkit"]
|
||||||
|
key_file: "/etc/pki/libvirt/private/serverkey.pem"
|
||||||
|
cert_file: "/etc/pki/libvirt/servercert.pem"
|
||||||
|
ca_file: "/etc/pki/CA/cacert.pem"
|
||||||
|
#crl_file: "/etc/pki/CA/crl.pem"
|
||||||
|
tls_no_sanity_certificate: 0
|
||||||
|
tls_no_verify_certificate: 0
|
||||||
|
tls_allowed_dn_list: ["DN1", "DN2"]
|
||||||
|
tls_priority: "NORMAL"
|
||||||
|
sasl_allowed_username_list: ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM"]
|
||||||
|
max_clients: 5000
|
||||||
|
max_queued_clients: 1000
|
||||||
|
max_anonymous_clients: 20
|
||||||
|
min_workers: 5
|
||||||
|
max_workers: 20
|
||||||
|
prio_workers: 5
|
||||||
|
max_client_requests: 5
|
||||||
|
admin_min_workers: 1
|
||||||
|
admin_max_workers: 5
|
||||||
|
admin_max_clients: 5
|
||||||
|
admin_max_queued_clients: 5
|
||||||
|
admin_max_client_requests: 5
|
||||||
|
log_level: 3
|
||||||
|
log_filters: "1:qemu 1:libvirt 4:object 4:json 4:event 1:util"
|
||||||
|
log_outputs: "3:syslog:libvirtd"
|
||||||
|
audit_level: 2
|
||||||
|
audit_logging: 1
|
||||||
|
#host_uuid: "00000000-0000-0000-0000-000000000000"
|
||||||
|
host_uuid_source: "smbios"
|
||||||
|
keepalive_interval: 5
|
||||||
|
keepalive_count: 5
|
||||||
|
keepalive_required: 1
|
||||||
|
admin_keepalive_required: 1
|
||||||
|
admin_keepalive_interval: 5
|
||||||
|
admin_keepalive_count: 5
|
||||||
|
ovs_timeout: 5
|
||||||
536
salt/libvirt/etc/libvirtd.conf
Normal file
536
salt/libvirt/etc/libvirtd.conf
Normal file
@@ -0,0 +1,536 @@
|
|||||||
|
# Master libvirt daemon configuration file
|
||||||
|
#
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# Network connectivity controls
|
||||||
|
#
|
||||||
|
|
||||||
|
# Flag listening for secure TLS connections on the public TCP/IP port.
|
||||||
|
#
|
||||||
|
# To enable listening sockets with the 'libvirtd' daemon it's also required to
|
||||||
|
# pass the '--listen' flag on the commandline of the daemon.
|
||||||
|
# This is not needed with 'virtproxyd'.
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
# It is necessary to setup a CA and issue server certificates before
|
||||||
|
# using this capability.
|
||||||
|
#
|
||||||
|
# This is enabled by default, uncomment this to disable it
|
||||||
|
#listen_tls = 0
|
||||||
|
|
||||||
|
# Listen for unencrypted TCP connections on the public TCP/IP port.
|
||||||
|
#
|
||||||
|
# To enable listening sockets with the 'libvirtd' daemon it's also required to
|
||||||
|
# pass the '--listen' flag on the commandline of the daemon.
|
||||||
|
# This is not needed with 'virtproxyd'.
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
# Using the TCP socket requires SASL authentication by default. Only
|
||||||
|
# SASL mechanisms which support data encryption are allowed. This is
|
||||||
|
# DIGEST_MD5 and GSSAPI (Kerberos5)
|
||||||
|
#
|
||||||
|
# This is disabled by default, uncomment this to enable it.
|
||||||
|
#listen_tcp = 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Override the port for accepting secure TLS connections
|
||||||
|
# This can be a port number, or service name
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
#tls_port = "16514"
|
||||||
|
|
||||||
|
# Override the port for accepting insecure TCP connections
|
||||||
|
# This can be a port number, or service name
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
#tcp_port = "16509"
|
||||||
|
|
||||||
|
|
||||||
|
# Override the default configuration which binds to all network
|
||||||
|
# interfaces. This can be a numeric IPv4/6 address, or hostname
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
# If the libvirtd service is started in parallel with network
|
||||||
|
# startup (e.g. with systemd), binding to addresses other than
|
||||||
|
# the wildcards (0.0.0.0/::) might not be available yet.
|
||||||
|
#
|
||||||
|
#listen_addr = "192.168.0.1"
|
||||||
|
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# UNIX socket access controls
|
||||||
|
#
|
||||||
|
|
||||||
|
# Set the UNIX domain socket group ownership. This can be used to
|
||||||
|
# allow a 'trusted' set of users access to management capabilities
|
||||||
|
# without becoming root.
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
# This is restricted to 'root' by default.
|
||||||
|
#unix_sock_group = "libvirt"
|
||||||
|
|
||||||
|
# Set the UNIX socket permissions for the R/O socket. This is used
|
||||||
|
# for monitoring VM status only
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
# Default allows any user. If setting group ownership, you may want to
|
||||||
|
# restrict this too.
|
||||||
|
#unix_sock_ro_perms = "0777"
|
||||||
|
|
||||||
|
# Set the UNIX socket permissions for the R/W socket. This is used
|
||||||
|
# for full management of VMs
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
# Default allows only root. If PolicyKit is enabled on the socket,
|
||||||
|
# the default will change to allow everyone (eg, 0777)
|
||||||
|
#
|
||||||
|
# If not using PolicyKit and setting group ownership for access
|
||||||
|
# control, then you may want to relax this too.
|
||||||
|
#unix_sock_rw_perms = "0770"
|
||||||
|
|
||||||
|
# Set the UNIX socket permissions for the admin interface socket.
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
# Default allows only owner (root), do not change it unless you are
|
||||||
|
# sure to whom you are exposing the access to.
|
||||||
|
#unix_sock_admin_perms = "0700"
|
||||||
|
|
||||||
|
# Set the name of the directory in which sockets will be found/created.
|
||||||
|
#
|
||||||
|
# This setting is not required or honoured if using systemd socket
|
||||||
|
# activation.
|
||||||
|
#
|
||||||
|
#unix_sock_dir = "/run/libvirt"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# Authentication.
|
||||||
|
#
|
||||||
|
# There are the following choices available:
|
||||||
|
#
|
||||||
|
# - none: do not perform auth checks. If you can connect to the
|
||||||
|
# socket you are allowed. This is suitable if there are
|
||||||
|
# restrictions on connecting to the socket (eg, UNIX
|
||||||
|
# socket permissions), or if there is a lower layer in
|
||||||
|
# the network providing auth (eg, TLS/x509 certificates)
|
||||||
|
#
|
||||||
|
# - sasl: use SASL infrastructure. The actual auth scheme is then
|
||||||
|
# controlled from /etc/sasl2/libvirt.conf. For the TCP
|
||||||
|
# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
|
||||||
|
# For non-TCP or TLS sockets, any scheme is allowed.
|
||||||
|
#
|
||||||
|
# - polkit: use PolicyKit to authenticate. This is only suitable
|
||||||
|
# for use on the UNIX sockets. The default policy will
|
||||||
|
# require a user to supply their own password to gain
|
||||||
|
# full read/write access (aka sudo like), while anyone
|
||||||
|
# is allowed read/only access.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Set an authentication scheme for UNIX read-only sockets
|
||||||
|
#
|
||||||
|
# By default socket permissions allow anyone to connect
|
||||||
|
#
|
||||||
|
# If libvirt was compiled without support for 'polkit', then
|
||||||
|
# no access control checks are done, but libvirt still only
|
||||||
|
# allows execution of APIs which don't change state.
|
||||||
|
#
|
||||||
|
# If libvirt was compiled with support for 'polkit', then
|
||||||
|
# the libvirt socket will perform a check with polkit after
|
||||||
|
# connections. The default policy still allows any local
|
||||||
|
# user access.
|
||||||
|
#
|
||||||
|
# To restrict monitoring of domains you may wish to either
|
||||||
|
# enable 'sasl' here, or change the polkit policy definition.
|
||||||
|
#auth_unix_ro = "polkit"
|
||||||
|
|
||||||
|
# Set an authentication scheme for UNIX read-write sockets.
|
||||||
|
#
|
||||||
|
# If libvirt was compiled without support for 'polkit', then
|
||||||
|
# the systemd .socket files will use SocketMode=0600 by default
|
||||||
|
# thus only allowing root user to connect, and 'auth_unix_rw'
|
||||||
|
# will default to 'none'.
|
||||||
|
#
|
||||||
|
# If libvirt was compiled with support for 'polkit', then
|
||||||
|
# the systemd .socket files will use SocketMode=0666 which
|
||||||
|
# allows any user to connect and 'auth_unix_rw' will default
|
||||||
|
# to 'polkit'. If you disable use of 'polkit' here, then it
|
||||||
|
# is essential to change the systemd SocketMode parameter
|
||||||
|
# back to 0600, to avoid an insecure configuration.
|
||||||
|
#
|
||||||
|
#auth_unix_rw = "polkit"
|
||||||
|
|
||||||
|
# Change the authentication scheme for TCP sockets.
|
||||||
|
#
|
||||||
|
# If you don't enable SASL, then all TCP traffic is cleartext.
|
||||||
|
# Don't do this outside of a dev/test scenario. For real world
|
||||||
|
# use, always enable SASL and use the GSSAPI or DIGEST-MD5
|
||||||
|
# mechanism in /etc/sasl2/libvirt.conf
|
||||||
|
#auth_tcp = "sasl"
|
||||||
|
|
||||||
|
# Change the authentication scheme for TLS sockets.
|
||||||
|
#
|
||||||
|
# TLS sockets already have encryption provided by the TLS
|
||||||
|
# layer, and limited authentication is done by certificates
|
||||||
|
#
|
||||||
|
# It is possible to make use of any SASL authentication
|
||||||
|
# mechanism as well, by using 'sasl' for this option
|
||||||
|
#auth_tls = "none"
|
||||||
|
|
||||||
|
# Enforce a minimum SSF value for TCP sockets
|
||||||
|
#
|
||||||
|
# The default minimum is currently 56 (single-DES) which will
|
||||||
|
# be raised to 112 in the future.
|
||||||
|
#
|
||||||
|
# This option can be used to set values higher than 112
|
||||||
|
#tcp_min_ssf = 112
|
||||||
|
|
||||||
|
|
||||||
|
# Change the API access control scheme
|
||||||
|
#
|
||||||
|
# By default an authenticated user is allowed access
|
||||||
|
# to all APIs. Access drivers can place restrictions
|
||||||
|
# on this. By default the 'nop' driver is enabled,
|
||||||
|
# meaning no access control checks are done once a
|
||||||
|
# client has authenticated with libvirtd
|
||||||
|
#
|
||||||
|
#access_drivers = [ "polkit" ]
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# TLS x509 certificate configuration
|
||||||
|
#
|
||||||
|
|
||||||
|
# Use of TLS requires that x509 certificates be issued. The default locations
|
||||||
|
# for the certificate files is as follows:
|
||||||
|
#
|
||||||
|
# /etc/pki/CA/cacert.pem - The CA master certificate
|
||||||
|
# /etc/pki/libvirt/servercert.pem - The server certificate signed by cacert.pem
|
||||||
|
# /etc/pki/libvirt/private/serverkey.pem - The server private key
|
||||||
|
#
|
||||||
|
# It is possible to override the default locations by altering the 'key_file',
|
||||||
|
# 'cert_file', and 'ca_file' values and uncommenting them below.
|
||||||
|
#
|
||||||
|
# NB, overriding the default of one location requires uncommenting and
|
||||||
|
# possibly additionally overriding the other settings.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Override the default server key file path
|
||||||
|
#
|
||||||
|
#key_file = "/etc/pki/libvirt/private/serverkey.pem"
|
||||||
|
|
||||||
|
# Override the default server certificate file path
|
||||||
|
#
|
||||||
|
#cert_file = "/etc/pki/libvirt/servercert.pem"
|
||||||
|
|
||||||
|
# Override the default CA certificate path
|
||||||
|
#
|
||||||
|
#ca_file = "/etc/pki/CA/cacert.pem"
|
||||||
|
|
||||||
|
# Specify a certificate revocation list.
|
||||||
|
#
|
||||||
|
# Defaults to not using a CRL, uncomment to enable it
|
||||||
|
#crl_file = "/etc/pki/CA/crl.pem"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# Authorization controls
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# Flag to disable verification of our own server certificates
|
||||||
|
#
|
||||||
|
# When libvirtd starts it performs some sanity checks against
|
||||||
|
# its own certificates.
|
||||||
|
#
|
||||||
|
# Default is to always run sanity checks. Uncommenting this
|
||||||
|
# will disable sanity checks which is not a good idea
|
||||||
|
#tls_no_sanity_certificate = 1
|
||||||
|
|
||||||
|
# Flag to disable verification of client certificates
|
||||||
|
#
|
||||||
|
# Client certificate verification is the primary authentication mechanism.
|
||||||
|
# Any client which does not present a certificate signed by the CA
|
||||||
|
# will be rejected.
|
||||||
|
#
|
||||||
|
# Default is to always verify. Uncommenting this will disable
|
||||||
|
# verification.
|
||||||
|
#tls_no_verify_certificate = 1
|
||||||
|
|
||||||
|
|
||||||
|
# An access control list of allowed x509 Distinguished Names
|
||||||
|
# This list may contain wildcards such as
|
||||||
|
#
|
||||||
|
# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
|
||||||
|
#
|
||||||
|
# Any * matches any number of consecutive spaces, like a simplified glob(7).
|
||||||
|
#
|
||||||
|
# The format of the DN for a particular certificate can be queried
|
||||||
|
# using:
|
||||||
|
#
|
||||||
|
# virt-pki-query-dn clientcert.pem
|
||||||
|
#
|
||||||
|
# NB If this is an empty list, no client can connect, so comment out
|
||||||
|
# entirely rather than using empty list to disable these checks
|
||||||
|
#
|
||||||
|
# By default, no DN's are checked
|
||||||
|
#tls_allowed_dn_list = ["DN1", "DN2"]
|
||||||
|
|
||||||
|
|
||||||
|
# Override the compile time default TLS priority string. The
|
||||||
|
# default is usually "NORMAL" unless overridden at build time.
|
||||||
|
# Only set this is it is desired for libvirt to deviate from
|
||||||
|
# the global default settings.
|
||||||
|
#
|
||||||
|
#tls_priority="NORMAL"
|
||||||
|
|
||||||
|
|
||||||
|
# An access control list of allowed SASL usernames. The format for username
|
||||||
|
# depends on the SASL authentication mechanism. Kerberos usernames
|
||||||
|
# look like username@REALM
|
||||||
|
#
|
||||||
|
# This list may contain wildcards such as
|
||||||
|
#
|
||||||
|
# "*@EXAMPLE.COM"
|
||||||
|
#
|
||||||
|
# See the g_pattern_match function for the format of the wildcards.
|
||||||
|
#
|
||||||
|
# https://developer.gnome.org/glib/stable/glib-Glob-style-pattern-matching.html
|
||||||
|
#
|
||||||
|
# NB If this is an empty list, no client can connect, so comment out
|
||||||
|
# entirely rather than using empty list to disable these checks
|
||||||
|
#
|
||||||
|
# By default, no Username's are checked
|
||||||
|
#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
|
||||||
|
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# Processing controls
|
||||||
|
#
|
||||||
|
|
||||||
|
# The maximum number of concurrent client connections to allow
|
||||||
|
# over all sockets combined.
|
||||||
|
#max_clients = 5000
|
||||||
|
|
||||||
|
# The maximum length of queue of connections waiting to be
|
||||||
|
# accepted by the daemon. Note, that some protocols supporting
|
||||||
|
# retransmission may obey this so that a later reattempt at
|
||||||
|
# connection succeeds.
|
||||||
|
#max_queued_clients = 1000
|
||||||
|
|
||||||
|
# The maximum length of queue of accepted but not yet
|
||||||
|
# authenticated clients. The default value is 20. Set this to
|
||||||
|
# zero to turn this feature off.
|
||||||
|
#max_anonymous_clients = 20
|
||||||
|
|
||||||
|
# The minimum limit sets the number of workers to start up
|
||||||
|
# initially. If the number of active clients exceeds this,
|
||||||
|
# then more threads are spawned, up to max_workers limit.
|
||||||
|
# Typically you'd want max_workers to equal maximum number
|
||||||
|
# of clients allowed
|
||||||
|
#min_workers = 5
|
||||||
|
#max_workers = 20
|
||||||
|
|
||||||
|
|
||||||
|
# The number of priority workers. If all workers from above
|
||||||
|
# pool are stuck, some calls marked as high priority
|
||||||
|
# (notably domainDestroy) can be executed in this pool.
|
||||||
|
#prio_workers = 5
|
||||||
|
|
||||||
|
# Limit on concurrent requests from a single client
|
||||||
|
# connection. To avoid one client monopolizing the server
|
||||||
|
# this should be a small fraction of the global max_workers
|
||||||
|
# parameter.
|
||||||
|
# Setting this too low may cause keepalive timeouts.
|
||||||
|
#max_client_requests = 5
|
||||||
|
|
||||||
|
# Same processing controls, but this time for the admin interface.
|
||||||
|
# For description of each option, be so kind to scroll few lines
|
||||||
|
# upwards.
|
||||||
|
|
||||||
|
#admin_min_workers = 1
|
||||||
|
#admin_max_workers = 5
|
||||||
|
#admin_max_clients = 5
|
||||||
|
#admin_max_queued_clients = 5
|
||||||
|
#admin_max_client_requests = 5
|
||||||
|
|
||||||
|
#################################################################
|
||||||
|
#
|
||||||
|
# Logging controls
|
||||||
|
#
|
||||||
|
|
||||||
|
# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
|
||||||
|
# basically 1 will log everything possible
|
||||||
|
#
|
||||||
|
# WARNING: USE OF THIS IS STRONGLY DISCOURAGED.
|
||||||
|
#
|
||||||
|
# WARNING: It outputs too much information to practically read.
|
||||||
|
# WARNING: The "log_filters" setting is recommended instead.
|
||||||
|
#
|
||||||
|
# WARNING: Journald applies rate limiting of messages and so libvirt
|
||||||
|
# WARNING: will limit "log_level" to only allow values 3 or 4 if
|
||||||
|
# WARNING: journald is the current output.
|
||||||
|
#
|
||||||
|
# WARNING: USE OF THIS IS STRONGLY DISCOURAGED.
|
||||||
|
#log_level = 3
|
||||||
|
|
||||||
|
# Logging filters:
|
||||||
|
# A filter allows to select a different logging level for a given category
|
||||||
|
# of logs. The format for a filter is:
|
||||||
|
#
|
||||||
|
# level:match
|
||||||
|
#
|
||||||
|
# where 'match' is a string which is matched against the category
|
||||||
|
# given in the VIR_LOG_INIT() at the top of each libvirt source
|
||||||
|
# file, e.g., "remote", "qemu", or "util.json". The 'match' in the
|
||||||
|
# filter matches using shell wildcard syntax (see 'man glob(7)').
|
||||||
|
# The 'match' is always treated as a substring match. IOW a match
|
||||||
|
# string 'foo' is equivalent to '*foo*'.
|
||||||
|
#
|
||||||
|
# 'level' is the minimal level where matching messages should
|
||||||
|
# be logged:
|
||||||
|
#
|
||||||
|
# 1: DEBUG
|
||||||
|
# 2: INFO
|
||||||
|
# 3: WARNING
|
||||||
|
# 4: ERROR
|
||||||
|
#
|
||||||
|
# Multiple filters can be defined in a single @log_filters, they just need
|
||||||
|
# to be separated by spaces. Note that libvirt performs "first" match, i.e.
|
||||||
|
# if there are concurrent filters, the first one that matches will be applied,
|
||||||
|
# given the order in @log_filters.
|
||||||
|
#
|
||||||
|
# A typical need is to capture information from a hypervisor driver,
|
||||||
|
# public API entrypoints and some of the utility code. Some utility
|
||||||
|
# code is very verbose and is generally not desired. Taking the QEMU
|
||||||
|
# hypervisor as an example, a suitable filter string for debugging
|
||||||
|
# might be to turn off object, json & event logging, but enable the
|
||||||
|
# rest of the util code:
|
||||||
|
#
|
||||||
|
#log_filters="1:qemu 1:libvirt 4:object 4:json 4:event 1:util"
|
||||||
|
|
||||||
|
# Logging outputs:
|
||||||
|
# An output is one of the places to save logging information
|
||||||
|
# The format for an output can be:
|
||||||
|
# level:stderr
|
||||||
|
# output goes to stderr
|
||||||
|
# level:syslog:name
|
||||||
|
# use syslog for the output and use the given name as the ident
|
||||||
|
# level:file:file_path
|
||||||
|
# output to a file, with the given filepath
|
||||||
|
# level:journald
|
||||||
|
# output to journald logging system
|
||||||
|
# In all cases 'level' is the minimal priority, acting as a filter
|
||||||
|
# 1: DEBUG
|
||||||
|
# 2: INFO
|
||||||
|
# 3: WARNING
|
||||||
|
# 4: ERROR
|
||||||
|
#
|
||||||
|
# Multiple outputs can be defined, they just need to be separated by spaces.
|
||||||
|
# e.g. to log all warnings and errors to syslog under the libvirtd ident:
|
||||||
|
#log_outputs="3:syslog:libvirtd"
|
||||||
|
|
||||||
|
|
||||||
|
##################################################################
|
||||||
|
#
|
||||||
|
# Auditing
|
||||||
|
#
|
||||||
|
# This setting allows usage of the auditing subsystem to be altered:
|
||||||
|
#
|
||||||
|
# audit_level == 0 -> disable all auditing
|
||||||
|
# audit_level == 1 -> enable auditing, only if enabled on host (default)
|
||||||
|
# audit_level == 2 -> enable auditing, and exit if disabled on host
|
||||||
|
#
|
||||||
|
#audit_level = 2
|
||||||
|
#
|
||||||
|
# If set to 1, then audit messages will also be sent
|
||||||
|
# via libvirt logging infrastructure. Defaults to 0
|
||||||
|
#
|
||||||
|
#audit_logging = 1
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
# UUID of the host:
|
||||||
|
# Host UUID is read from one of the sources specified in host_uuid_source.
|
||||||
|
#
|
||||||
|
# - 'smbios': fetch the UUID from 'dmidecode -s system-uuid'
|
||||||
|
# - 'machine-id': fetch the UUID from /etc/machine-id
|
||||||
|
#
|
||||||
|
# The host_uuid_source default is 'smbios'. If 'dmidecode' does not provide
|
||||||
|
# a valid UUID a temporary UUID will be generated.
|
||||||
|
#
|
||||||
|
# Another option is to specify host UUID in host_uuid.
|
||||||
|
#
|
||||||
|
# Keep the format of the example UUID below. UUID must not have all digits
|
||||||
|
# be the same.
|
||||||
|
|
||||||
|
# NB This default all-zeros UUID will not work. Replace
|
||||||
|
# it with the output of the 'uuidgen' command and then
|
||||||
|
# uncomment this entry
|
||||||
|
#host_uuid = "00000000-0000-0000-0000-000000000000"
|
||||||
|
#host_uuid_source = "smbios"
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
# Keepalive protocol:
|
||||||
|
# This allows libvirtd to detect broken client connections or even
|
||||||
|
# dead clients. A keepalive message is sent to a client after
|
||||||
|
# keepalive_interval seconds of inactivity to check if the client is
|
||||||
|
# still responding; keepalive_count is a maximum number of keepalive
|
||||||
|
# messages that are allowed to be sent to the client without getting
|
||||||
|
# any response before the connection is considered broken. In other
|
||||||
|
# words, the connection is automatically closed approximately after
|
||||||
|
# keepalive_interval * (keepalive_count + 1) seconds since the last
|
||||||
|
# message received from the client. If keepalive_interval is set to
|
||||||
|
# -1, libvirtd will never send keepalive requests; however clients
|
||||||
|
# can still send them and the daemon will send responses. When
|
||||||
|
# keepalive_count is set to 0, connections will be automatically
|
||||||
|
# closed after keepalive_interval seconds of inactivity without
|
||||||
|
# sending any keepalive messages.
|
||||||
|
#
|
||||||
|
#keepalive_interval = 5
|
||||||
|
#keepalive_count = 5
|
||||||
|
|
||||||
|
#
|
||||||
|
# These configuration options are no longer used. There is no way to
|
||||||
|
# restrict such clients from connecting since they first need to
|
||||||
|
# connect in order to ask for keepalive.
|
||||||
|
#
|
||||||
|
#keepalive_required = 1
|
||||||
|
#admin_keepalive_required = 1
|
||||||
|
|
||||||
|
# Keepalive settings for the admin interface
|
||||||
|
#admin_keepalive_interval = 5
|
||||||
|
#admin_keepalive_count = 5
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
# Open vSwitch:
|
||||||
|
# This allows to specify a timeout for openvswitch calls made by
|
||||||
|
# libvirt. The ovs-vsctl utility is used for the configuration and
|
||||||
|
# its timeout option is set by default to 5 seconds to avoid
|
||||||
|
# potential infinite waits blocking libvirt.
|
||||||
|
#
|
||||||
|
#ovs_timeout = 5
|
||||||
8
salt/libvirt/etc/libvirtd.conf.jinja
Normal file
8
salt/libvirt/etc/libvirtd.conf.jinja
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. -#}
|
||||||
|
|
||||||
|
{%- for k, v in LIBVIRTMERGED.config.items() %}
|
||||||
|
{{ k }} = {{ v | json }}
|
||||||
|
{%- endfor %}
|
||||||
200
salt/libvirt/images/init.sls
Normal file
200
salt/libvirt/images/init.sls
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- hypervisor
|
||||||
|
- libvirt.packages
|
||||||
|
|
||||||
|
nsm_libvirt_images:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/libvirt/images/sool9
|
||||||
|
- dir_mode: 775
|
||||||
|
- file_mode: 640
|
||||||
|
- recurse:
|
||||||
|
- mode
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
# Remove hash file if image isn't present. This will allow for the image to redownload and initialize.
|
||||||
|
remove_sha256_sool9:
|
||||||
|
file.absent:
|
||||||
|
- name: /nsm/libvirt/images/sool9/sool9.sha256
|
||||||
|
- unless: test -f /nsm/libvirt/images/sool9/sool9.qcow2
|
||||||
|
|
||||||
|
# Manage SHA256 hash file
|
||||||
|
manage_sha256_sool9:
|
||||||
|
file.managed:
|
||||||
|
- name: /nsm/libvirt/images/sool9/sool9.sha256
|
||||||
|
- source: salt://libvirt/images/sool9/sool9.sha256
|
||||||
|
|
||||||
|
# Manage cloud-init files
|
||||||
|
manage_metadata_sool9:
|
||||||
|
file.managed:
|
||||||
|
- name: /nsm/libvirt/images/sool9/meta-data
|
||||||
|
- source: salt://libvirt/images/sool9/meta-data
|
||||||
|
|
||||||
|
manage_userdata_sool9:
|
||||||
|
file.managed:
|
||||||
|
- name: /nsm/libvirt/images/sool9/user-data
|
||||||
|
- source: salt://libvirt/images/sool9/user-data
|
||||||
|
|
||||||
|
# Manage qcow2 image
|
||||||
|
manage_qcow2_sool9:
|
||||||
|
file.managed:
|
||||||
|
- name: /nsm/libvirt/images/sool9/sool9.qcow2
|
||||||
|
- source: salt://libvirt/images/sool9/sool9.qcow2
|
||||||
|
- onchanges:
|
||||||
|
- file: manage_sha256_sool9
|
||||||
|
- file: manage_metadata_sool9
|
||||||
|
- file: manage_userdata_sool9
|
||||||
|
|
||||||
|
manage_cidata_sool9:
|
||||||
|
file.managed:
|
||||||
|
- name: /nsm/libvirt/images/sool9/sool9-cidata.iso
|
||||||
|
- source: salt://libvirt/images/sool9/sool9-cidata.iso
|
||||||
|
- onchanges:
|
||||||
|
- file: manage_qcow2_sool9
|
||||||
|
|
||||||
|
# Define the storage pool
|
||||||
|
define_storage_pool_sool9:
|
||||||
|
virt.pool_defined:
|
||||||
|
- name: sool9
|
||||||
|
- ptype: dir
|
||||||
|
- target: /nsm/libvirt/images/sool9
|
||||||
|
- require:
|
||||||
|
- file: manage_metadata_sool9
|
||||||
|
- file: manage_userdata_sool9
|
||||||
|
- file: manage_cidata_sool9
|
||||||
|
- cmd: libvirt_python_module
|
||||||
|
- unless:
|
||||||
|
- virsh pool-list --all | grep -q sool9
|
||||||
|
|
||||||
|
# Set pool autostart
|
||||||
|
set_pool_autostart_sool9:
|
||||||
|
cmd.run:
|
||||||
|
- name: virsh pool-autostart sool9
|
||||||
|
- require:
|
||||||
|
- virt: define_storage_pool_sool9
|
||||||
|
- unless:
|
||||||
|
- virsh pool-info sool9 | grep -q "Autostart.*yes"
|
||||||
|
|
||||||
|
# Start the storage pool
|
||||||
|
start_storage_pool_sool9:
|
||||||
|
cmd.run:
|
||||||
|
- name: virsh pool-start sool9
|
||||||
|
- require:
|
||||||
|
- virt: define_storage_pool_sool9
|
||||||
|
- cmd: libvirt_python_module
|
||||||
|
- unless:
|
||||||
|
- virsh pool-info sool9 | grep -q "State.*running"
|
||||||
|
|
||||||
|
# Stop the VM if running and base image files change
|
||||||
|
stop_vm_sool9:
|
||||||
|
module.run:
|
||||||
|
- virt.stop:
|
||||||
|
- name: sool9
|
||||||
|
- onchanges:
|
||||||
|
- file: manage_qcow2_sool9
|
||||||
|
- file: manage_metadata_sool9
|
||||||
|
- file: manage_userdata_sool9
|
||||||
|
- file: manage_cidata_sool9
|
||||||
|
- require_in:
|
||||||
|
- module: undefine_vm_sool9
|
||||||
|
- onlyif:
|
||||||
|
# Only try to stop if VM is actually running
|
||||||
|
- virsh list --state-running --name | grep -q sool9
|
||||||
|
|
||||||
|
undefine_vm_sool9:
|
||||||
|
module.run:
|
||||||
|
- virt.undefine:
|
||||||
|
- vm_: sool9
|
||||||
|
- onchanges:
|
||||||
|
- file: manage_qcow2_sool9
|
||||||
|
- file: manage_metadata_sool9
|
||||||
|
- file: manage_userdata_sool9
|
||||||
|
- file: manage_cidata_sool9
|
||||||
|
# Note: When VM doesn't exist, you'll see "error: failed to get domain 'sool9'" - this is expected
|
||||||
|
# [ERROR ] Command 'virsh' failed with return code: 1
|
||||||
|
# [ERROR ] stdout: error: failed to get domain 'sool9'
|
||||||
|
- onlyif:
|
||||||
|
- virsh dominfo sool9
|
||||||
|
|
||||||
|
# Create and start the VM, letting cloud-init run
|
||||||
|
create_vm_sool9:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
virt-install --name sool9 \
|
||||||
|
--memory 12288 --vcpus 8 --cpu host-model \
|
||||||
|
--disk /nsm/libvirt/images/sool9/sool9.qcow2,format=qcow2,bus=virtio \
|
||||||
|
--disk /nsm/libvirt/images/sool9/sool9-cidata.iso,device=cdrom \
|
||||||
|
--network bridge=br0,model=virtio \
|
||||||
|
--os-variant=ol9.5 \
|
||||||
|
--import \
|
||||||
|
--noautoconsole
|
||||||
|
- require:
|
||||||
|
- cmd: start_storage_pool_sool9
|
||||||
|
- pkg: install_virt-install
|
||||||
|
- onchanges:
|
||||||
|
- file: manage_qcow2_sool9
|
||||||
|
- file: manage_metadata_sool9
|
||||||
|
- file: manage_userdata_sool9
|
||||||
|
- file: manage_cidata_sool9
|
||||||
|
|
||||||
|
# Wait for cloud-init to complete and VM to shutdown
|
||||||
|
wait_for_cloud_init_sool9:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-wait-cloud-init -n sool9
|
||||||
|
- require:
|
||||||
|
- cmd: create_vm_sool9
|
||||||
|
- onchanges:
|
||||||
|
- cmd: create_vm_sool9
|
||||||
|
- timeout: 600
|
||||||
|
|
||||||
|
# Configure network predictability after cloud-init
|
||||||
|
configure_network_predictable_sool9:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-qcow2-network-predictable -n sool9
|
||||||
|
- require:
|
||||||
|
- cmd: wait_for_cloud_init_sool9
|
||||||
|
- onchanges:
|
||||||
|
- cmd: create_vm_sool9
|
||||||
|
|
||||||
|
# Fire event here that causes soc.dyanno.hypervisor state to be applied
|
||||||
|
base_domain_ready:
|
||||||
|
event.send:
|
||||||
|
- name: soc/dyanno/hypervisor/baseDomain
|
||||||
|
- data:
|
||||||
|
status: 'Initialized'
|
||||||
|
- require:
|
||||||
|
- cmd: configure_network_predictable_sool9
|
||||||
|
- onchanges:
|
||||||
|
- cmd: create_vm_sool9
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
1
salt/libvirt/images/sool9/README
Normal file
1
salt/libvirt/images/sool9/README
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# The files in this directory (/opt/so/saltstack/local/salt/libvirt/images/sool9) are generated by the setup_hypervisor runner. They are then distributed to the hypervisors where a storage pool will be created then the image can be installed.
|
||||||
112
salt/libvirt/init.sls
Normal file
112
salt/libvirt/init.sls
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
{% from 'libvirt/map.jinja' import LIBVIRTMERGED %}
|
||||||
|
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- libvirt.64962
|
||||||
|
- libvirt.packages
|
||||||
|
- libvirt.ssh.users
|
||||||
|
|
||||||
|
install_libvirt:
|
||||||
|
pkg.installed:
|
||||||
|
- name: libvirt
|
||||||
|
|
||||||
|
libvirt_conf_dir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/libvirt
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
libvirt_config:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/libvirt/libvirtd.conf
|
||||||
|
- source: salt://libvirt/etc/libvirtd.conf
|
||||||
|
# - source: salt://libvirt/etc/libvirtd.conf.jinja
|
||||||
|
# - template: jinja
|
||||||
|
# - defaults:
|
||||||
|
# LIBVIRTMERGED: {{ LIBVIRTMERGED }}
|
||||||
|
|
||||||
|
# since the libvirtd service looks for the config at /etc/libvirt/libvirtd.conf, and we dont want to manage the service looking in a new location, create this symlink to the managed config
|
||||||
|
config_symlink:
|
||||||
|
file.symlink:
|
||||||
|
- name: /etc/libvirt/libvirtd.conf
|
||||||
|
- target: /opt/so/conf/libvirt/libvirtd.conf
|
||||||
|
- force: True
|
||||||
|
- user: qemu
|
||||||
|
- group: qemu
|
||||||
|
|
||||||
|
libvirt_service:
|
||||||
|
service.running:
|
||||||
|
- name: libvirtd
|
||||||
|
- enable: True
|
||||||
|
- watch:
|
||||||
|
- file: libvirt_config
|
||||||
|
|
||||||
|
# places cacert, clientcert, clientkey, servercert and serverkey
|
||||||
|
# /etc/pki/CA/cacert.pem
|
||||||
|
# /etc/pki/libvirt/clientcert.pem and /etc/pki/libvirt/servercert.pem
|
||||||
|
# /etc/pki/libvirt/private/clientkey.pem and /etc/pki/libvirt/private/serverkey.pem
|
||||||
|
libvirt_keys:
|
||||||
|
virt.keys:
|
||||||
|
- name: libvirt_keys
|
||||||
|
|
||||||
|
install_qemu:
|
||||||
|
pkg.installed:
|
||||||
|
- name: qemu-kvm
|
||||||
|
|
||||||
|
create_host_bridge:
|
||||||
|
virt.network_running:
|
||||||
|
- name: host-bridge
|
||||||
|
- bridge: br0
|
||||||
|
- forward: bridge
|
||||||
|
- autostart: True
|
||||||
|
|
||||||
|
# Disable the default storage pool to avoid conflicts
|
||||||
|
disable_default_pool:
|
||||||
|
cmd.run:
|
||||||
|
- name: virsh pool-destroy default && virsh pool-autostart default --disable
|
||||||
|
- onlyif: virsh pool-list | grep default
|
||||||
|
- require:
|
||||||
|
- pkg: install_libvirt-client
|
||||||
|
- service: libvirt_service
|
||||||
|
|
||||||
|
disable_default_bridge:
|
||||||
|
cmd.run:
|
||||||
|
- name: virsh net-destroy default && virsh net-autostart default --disable
|
||||||
|
- require:
|
||||||
|
- pkg: install_libvirt-client
|
||||||
|
- service: libvirt_service
|
||||||
|
- onlyif:
|
||||||
|
- virsh net-list | grep default
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
7
salt/libvirt/map.jinja
Normal file
7
salt/libvirt/map.jinja
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% import_yaml 'libvirt/defaults.yaml' as LIBVIRTDEFAULTS %}
|
||||||
|
{% set LIBVIRTMERGED = salt['pillar.get']('libvirt', LIBVIRTDEFAULTS.libvirt, merge=True) %}
|
||||||
84
salt/libvirt/packages.sls
Normal file
84
salt/libvirt/packages.sls
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
# allows for creating vm images
|
||||||
|
# any node manipulating images needs this
|
||||||
|
# used on manager for setup_hypervisor runner
|
||||||
|
install_qemu-img:
|
||||||
|
pkg.installed:
|
||||||
|
- name: qemu-img
|
||||||
|
|
||||||
|
# used on manager for setup_hypervisor runner
|
||||||
|
install_xorriso:
|
||||||
|
pkg.installed:
|
||||||
|
- name: xorriso
|
||||||
|
|
||||||
|
install_libvirt-libs:
|
||||||
|
pkg.installed:
|
||||||
|
- name: libvirt-libs
|
||||||
|
|
||||||
|
libvirt_python_wheel:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/libvirt/source-packages/libvirt-python
|
||||||
|
- source: salt://libvirt/source-packages/libvirt-python
|
||||||
|
- makedirs: True
|
||||||
|
- clean: True
|
||||||
|
|
||||||
|
libvirt_python_module:
|
||||||
|
cmd.run:
|
||||||
|
- name: /opt/saltstack/salt/bin/python3 -m pip install --no-index --find-links=/opt/so/conf/libvirt/source-packages/libvirt-python libvirt-python
|
||||||
|
- onchanges:
|
||||||
|
- file: libvirt_python_wheel
|
||||||
|
|
||||||
|
{% if 'hype' in grains.id.split('_') | last %}
|
||||||
|
|
||||||
|
# provides virsh
|
||||||
|
install_libvirt-client:
|
||||||
|
pkg.installed:
|
||||||
|
- name: libvirt-client
|
||||||
|
|
||||||
|
install_guestfs-tools:
|
||||||
|
pkg.installed:
|
||||||
|
- name: guestfs-tools
|
||||||
|
|
||||||
|
install_virt-install:
|
||||||
|
pkg.installed:
|
||||||
|
- name: virt-install
|
||||||
|
|
||||||
|
# needed for for so-qcow2-modify-network - import guestfs
|
||||||
|
install_python3-libguestfs:
|
||||||
|
pkg.installed:
|
||||||
|
- name: python3-libguestfs
|
||||||
|
###
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
Binary file not shown.
2
salt/libvirt/ssh/files/config
Normal file
2
salt/libvirt/ssh/files/config
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
Host *
|
||||||
|
IdentityFile /etc/ssh/auth_keys/soqemussh/id_ed25519
|
||||||
62
salt/libvirt/ssh/users.sls
Normal file
62
salt/libvirt/ssh/users.sls
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
{% if GLOBALS.is_manager %}
|
||||||
|
|
||||||
|
qemu_ssh_client_config:
|
||||||
|
file.managed:
|
||||||
|
- name: /root/.ssh/config
|
||||||
|
- source: salt://libvirt/ssh/files/config
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhype'] %}
|
||||||
|
|
||||||
|
# used for qemu+ssh connection between manager and hypervisors
|
||||||
|
create_soqemussh_user:
|
||||||
|
user.present:
|
||||||
|
- name: soqemussh
|
||||||
|
- shell: /bin/bash
|
||||||
|
- home: /home/soqemussh
|
||||||
|
- groups:
|
||||||
|
- wheel
|
||||||
|
- qemu
|
||||||
|
- libvirt
|
||||||
|
|
||||||
|
soqemussh_pub_key:
|
||||||
|
ssh_auth.present:
|
||||||
|
- user: soqemussh
|
||||||
|
- source: salt://libvirt/ssh/keys/id_ed25519.pub
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -12,6 +12,8 @@ logstash:
|
|||||||
- search
|
- search
|
||||||
manager:
|
manager:
|
||||||
- manager
|
- manager
|
||||||
|
managerhype:
|
||||||
|
- manager
|
||||||
managersearch:
|
managersearch:
|
||||||
- manager
|
- manager
|
||||||
- search
|
- search
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ include:
|
|||||||
- elasticsearch.ca
|
- elasticsearch.ca
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{# Kafka ca runs on nodes that can run logstash for Kafka input / output. Only when Kafka is global pipeline #}
|
{# Kafka ca runs on nodes that can run logstash for Kafka input / output. Only when Kafka is global pipeline #}
|
||||||
{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %}
|
{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managerhype', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %}
|
||||||
- kafka.ca
|
- kafka.ca
|
||||||
- kafka.ssl
|
- kafka.ssl
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -65,26 +65,26 @@ so-logstash:
|
|||||||
- /opt/so/log/logstash:/var/log/logstash:rw
|
- /opt/so/log/logstash:/var/log/logstash:rw
|
||||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||||
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
|
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
|
||||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||||
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
|
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
|
||||||
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
|
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-eval','so-fleet', 'so-heavynode', 'so-receiver'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
|
||||||
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
|
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
|
||||||
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
|
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
|
||||||
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
|
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
|
||||||
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
|
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||||
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
||||||
{% else %}
|
{% else %}
|
||||||
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
{% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||||
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
||||||
- /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro
|
- /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -100,7 +100,7 @@ so-logstash:
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- watch:
|
- watch:
|
||||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-fleet', 'so-receiver'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-receiver'] %}
|
||||||
- x509: etc_elasticfleet_logstash_key
|
- x509: etc_elasticfleet_logstash_key
|
||||||
- x509: etc_elasticfleet_logstash_crt
|
- x509: etc_elasticfleet_logstash_crt
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -111,23 +111,23 @@ so-logstash:
|
|||||||
- file: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}
|
- file: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||||
- file: kafkacertz
|
- file: kafkacertz
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- require:
|
- require:
|
||||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||||
- x509: etc_filebeat_crt
|
- x509: etc_filebeat_crt
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||||
- x509: pki_public_ca_crt
|
- x509: pki_public_ca_crt
|
||||||
{% else %}
|
{% else %}
|
||||||
- x509: trusttheca
|
- x509: trusttheca
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
{% if grains.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||||
- file: cacertz
|
- file: cacertz
|
||||||
- file: capemz
|
- file: capemz
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||||
- file: kafkacertz
|
- file: kafkacertz
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|||||||
64
salt/manager/hypervisor.sls
Normal file
64
salt/manager/hypervisor.sls
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
{% set manager_hostname = grains.id.split('_')[0] %}
|
||||||
|
|
||||||
|
# Check if hypervisor environment has been set up
|
||||||
|
{% set ssh_user_exists = salt['user.info']('soqemussh') %}
|
||||||
|
{% set ssh_keys_exist = salt['file.file_exists']('/etc/ssh/auth_keys/soqemussh/id_ed25519') and
|
||||||
|
salt['file.file_exists']('/etc/ssh/auth_keys/soqemussh/id_ed25519.pub') and
|
||||||
|
salt['file.file_exists']('/opt/so/saltstack/local/salt/libvirt/ssh/keys/id_ed25519.pub') %}
|
||||||
|
{% set base_image_exists = salt['file.file_exists']('/nsm/libvirt/boot/OL9U5_x86_64-kvm-b253.qcow2') %}
|
||||||
|
{% set vm_files_exist = salt['file.directory_exists']('/opt/so/saltstack/local/salt/libvirt/images/sool9') and
|
||||||
|
salt['file.file_exists']('/opt/so/saltstack/local/salt/libvirt/images/sool9/sool9.qcow2') and
|
||||||
|
salt['file.file_exists']('/opt/so/saltstack/local/salt/libvirt/images/sool9/sool9-cidata.iso') %}
|
||||||
|
{% set hypervisor_host_dir_exists = salt['file.directory_exists']('/opt/so/saltstack/local/salt/hypervisor/hosts/' ~ manager_hostname) %}
|
||||||
|
|
||||||
|
{% if ssh_user_exists and ssh_keys_exist and base_image_exists and vm_files_exist and hypervisor_host_dir_exists %}
|
||||||
|
# Hypervisor environment is already set up, include the necessary states
|
||||||
|
include:
|
||||||
|
- hypervisor
|
||||||
|
- libvirt
|
||||||
|
- libvirt.images
|
||||||
|
|
||||||
|
hypervisor_setup_verified:
|
||||||
|
test.succeed_without_changes:
|
||||||
|
- name: Hypervisor environment is already set up
|
||||||
|
- comment: All required files and configurations for the hypervisor environment exist
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
# Hypervisor environment needs to be set up
|
||||||
|
run_setup_hypervisor:
|
||||||
|
salt.runner:
|
||||||
|
- name: setup_hypervisor.setup_environment
|
||||||
|
- minion_id: {{ grains.id }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -10,27 +10,44 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import yaml
|
import yaml
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Configure logging to both file and console
|
||||||
|
logger = logging.getLogger('so-firewall')
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# File handler
|
||||||
|
file_handler = logging.FileHandler('/opt/so/log/so-firewall.log')
|
||||||
|
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
|
||||||
|
# Console handler
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
console_handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
|
||||||
lockFile = "/tmp/so-firewall.lock"
|
lockFile = "/tmp/so-firewall.lock"
|
||||||
hostgroupsFilename = "/opt/so/saltstack/local/pillar/firewall/soc_firewall.sls"
|
hostgroupsFilename = "/opt/so/saltstack/local/pillar/firewall/soc_firewall.sls"
|
||||||
defaultsFilename = "/opt/so/saltstack/default/salt/firewall/defaults.yaml"
|
defaultsFilename = "/opt/so/saltstack/default/salt/firewall/defaults.yaml"
|
||||||
|
|
||||||
def showUsage(options, args):
|
def showUsage(options, args):
|
||||||
print('Usage: {} [OPTIONS] <COMMAND> [ARGS...]'.format(sys.argv[0]))
|
usage = f'''Usage: {sys.argv[0]} [OPTIONS] <COMMAND> [ARGS...]
|
||||||
print(' Options:')
|
Options:
|
||||||
print(' --apply - After updating the firewall configuration files, apply the new firewall state')
|
--apply - After updating the firewall configuration files, apply the new firewall state with queue=True
|
||||||
print('')
|
|
||||||
print(' General commands:')
|
General commands:
|
||||||
print(' help - Prints this usage information.')
|
help - Prints this usage information.
|
||||||
print(' apply - Apply the firewall state.')
|
apply - Apply the firewall state.
|
||||||
print('')
|
|
||||||
print(' Host commands:')
|
Host commands:
|
||||||
print(' includehost - Includes the given IP in the given group. Args: <GROUP_NAME> <IP>')
|
includehost - Includes the given IP in the given group. Args: <GROUP_NAME> <IP>
|
||||||
print(' addhostgroup - Adds a new, custom host group. Args: <GROUP_NAME>')
|
removehost - Removes the given IP from all hostgroups. Args: <IP>
|
||||||
print('')
|
addhostgroup - Adds a new, custom host group. Args: <GROUP_NAME>
|
||||||
print(' Where:')
|
|
||||||
print(' GROUP_NAME - The name of an alias group (Ex: analyst)')
|
Where:
|
||||||
print(' IP - Either a single IP address (Ex: 8.8.8.8) or a CIDR block (Ex: 10.23.0.0/16).')
|
GROUP_NAME - The name of an alias group (Ex: analyst)
|
||||||
|
IP - Either a single IP address (Ex: 8.8.8.8) or a CIDR block (Ex: 10.23.0.0/16).'''
|
||||||
|
logger.error(usage)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def checkApplyOption(options):
|
def checkApplyOption(options):
|
||||||
@@ -61,7 +78,7 @@ def addIp(name, ip):
|
|||||||
else:
|
else:
|
||||||
hostgroup = content['firewall']['hostgroups'][name]
|
hostgroup = content['firewall']['hostgroups'][name]
|
||||||
else:
|
else:
|
||||||
print('Host group not defined in salt/firewall/defaults.yaml or hostgroup name is unallowed.', file=sys.stderr)
|
logger.error(f"Host group {name} not defined in defaults or is unallowed")
|
||||||
return 4
|
return 4
|
||||||
ips = hostgroup
|
ips = hostgroup
|
||||||
if ips is None:
|
if ips is None:
|
||||||
@@ -69,15 +86,16 @@ def addIp(name, ip):
|
|||||||
hostgroup = ips
|
hostgroup = ips
|
||||||
if ip not in ips:
|
if ip not in ips:
|
||||||
ips.append(ip)
|
ips.append(ip)
|
||||||
else:
|
|
||||||
print('Already exists', file=sys.stderr)
|
|
||||||
return 3
|
|
||||||
writeYaml(hostgroupsFilename, content)
|
writeYaml(hostgroupsFilename, content)
|
||||||
|
logger.info(f"Successfully added IP {ip} to hostgroup {name}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"IP {ip} already exists in hostgroup {name}")
|
||||||
|
return 3
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def includehost(options, args):
|
def includehost(options, args):
|
||||||
if len(args) != 2:
|
if len(args) != 2:
|
||||||
print('Missing host group name or ip argument', file=sys.stderr)
|
logger.error('Missing host group name or ip argument')
|
||||||
showUsage(options, args)
|
showUsage(options, args)
|
||||||
result = addIp(args[0], args[1])
|
result = addIp(args[0], args[1])
|
||||||
code = result
|
code = result
|
||||||
@@ -86,9 +104,45 @@ def includehost(options, args):
|
|||||||
return code
|
return code
|
||||||
|
|
||||||
def apply(options, args):
|
def apply(options, args):
|
||||||
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
|
logger.info("Applying firewall configuration changes")
|
||||||
|
salt_args = ['salt-call', 'state.apply', 'firewall', 'queue=True']
|
||||||
|
proc = subprocess.run(salt_args)
|
||||||
|
if proc.returncode != 0:
|
||||||
|
logger.error("Failed to apply firewall changes")
|
||||||
|
else:
|
||||||
|
logger.info("Successfully applied firewall changes")
|
||||||
return proc.returncode
|
return proc.returncode
|
||||||
|
|
||||||
|
def removehost(options, args):
|
||||||
|
"""Remove an IP from all hostgroups and apply changes if requested"""
|
||||||
|
if len(args) != 1:
|
||||||
|
logger.error('Missing IP argument')
|
||||||
|
showUsage(options, args)
|
||||||
|
|
||||||
|
ip = args[0]
|
||||||
|
content = loadYaml(hostgroupsFilename)
|
||||||
|
if not content or 'firewall' not in content or 'hostgroups' not in content['firewall']:
|
||||||
|
logger.error("Invalid firewall configuration structure")
|
||||||
|
return 4
|
||||||
|
|
||||||
|
modified = False
|
||||||
|
removed_from = []
|
||||||
|
for group_name, ips in content['firewall']['hostgroups'].items():
|
||||||
|
if ips and ip in ips:
|
||||||
|
ips.remove(ip)
|
||||||
|
modified = True
|
||||||
|
removed_from.append(group_name)
|
||||||
|
|
||||||
|
if modified:
|
||||||
|
writeYaml(hostgroupsFilename, content)
|
||||||
|
logger.info(f"Successfully removed IP {ip} from hostgroups: {', '.join(removed_from)}")
|
||||||
|
if "--apply" in options:
|
||||||
|
return apply(None, None)
|
||||||
|
else:
|
||||||
|
logger.error(f"IP {ip} not found in any hostgroups")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
options = []
|
options = []
|
||||||
args = sys.argv[1:]
|
args = sys.argv[1:]
|
||||||
@@ -103,6 +157,7 @@ def main():
|
|||||||
commands = {
|
commands = {
|
||||||
"help": showUsage,
|
"help": showUsage,
|
||||||
"includehost": includehost,
|
"includehost": includehost,
|
||||||
|
"removehost": removehost,
|
||||||
"apply": apply
|
"apply": apply
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,7 +176,7 @@ def main():
|
|||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
||||||
if lockAttempts == maxAttempts:
|
if lockAttempts == maxAttempts:
|
||||||
print("Lock file (" + lockFile + ") could not be created; proceeding without lock.")
|
logger.error(f"Lock file ({lockFile}) could not be created - proceeding without lock")
|
||||||
|
|
||||||
cmd = commands.get(args[0], showUsage)
|
cmd = commands.get(args[0], showUsage)
|
||||||
code = cmd(options, args[1:])
|
code = cmd(options, args[1:])
|
||||||
@@ -129,7 +184,7 @@ def main():
|
|||||||
try:
|
try:
|
||||||
os.remove(lockFile)
|
os.remove(lockFile)
|
||||||
except:
|
except:
|
||||||
print("Lock file (" + lockFile + ") already removed")
|
logger.error(f"Lock file ({lockFile}) already removed")
|
||||||
|
|
||||||
sys.exit(code)
|
sys.exit(code)
|
||||||
|
|
||||||
|
|||||||
@@ -51,6 +51,10 @@ fi
|
|||||||
'MANAGER')
|
'MANAGER')
|
||||||
so-firewall includehost manager "$IP"
|
so-firewall includehost manager "$IP"
|
||||||
;;
|
;;
|
||||||
|
'MANAGERHYPE')
|
||||||
|
so-firewall includehost manager "$IP"
|
||||||
|
so-firewall includehost hypervisor "$IP" --apply
|
||||||
|
;;
|
||||||
'MANAGERSEARCH')
|
'MANAGERSEARCH')
|
||||||
so-firewall includehost manager "$IP"
|
so-firewall includehost manager "$IP"
|
||||||
so-firewall includehost searchnode "$IP" --apply
|
so-firewall includehost searchnode "$IP" --apply
|
||||||
@@ -82,4 +86,7 @@ fi
|
|||||||
'DESKTOP')
|
'DESKTOP')
|
||||||
so-firewall includehost desktop "$IP" --apply
|
so-firewall includehost desktop "$IP" --apply
|
||||||
;;
|
;;
|
||||||
|
'HYPERVISOR')
|
||||||
|
so-firewall includehost hypervisor "$IP" --apply
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -837,7 +837,15 @@ up_to_2.4.160() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
up_to_2.4.170() {
|
up_to_2.4.170() {
|
||||||
echo "Nothing to do for 2.4.170"
|
echo "Creating pillar files for virtualization feature"
|
||||||
|
|
||||||
|
states=("hypervisor" "vm" "libvirt")
|
||||||
|
|
||||||
|
# Create pillar files for each state
|
||||||
|
for state in "${states[@]}"; do
|
||||||
|
mkdir -p /opt/so/saltstack/local/pillar/$state
|
||||||
|
touch /opt/so/saltstack/local/pillar/$state/adv_$state.sls /opt/so/saltstack/local/pillar/$state/soc_$state.sls
|
||||||
|
done
|
||||||
|
|
||||||
INSTALLEDVERSION=2.4.170
|
INSTALLEDVERSION=2.4.170
|
||||||
}
|
}
|
||||||
|
|||||||
665
salt/manager/tools/sbin_jinja/so-salt-cloud
Normal file
665
salt/manager/tools/sbin_jinja/so-salt-cloud
Normal file
@@ -0,0 +1,665 @@
|
|||||||
|
#!/opt/saltstack/salt/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||||
|
|
||||||
|
"""
|
||||||
|
Script for automated virtual machine provisioning and configuration in Security Onion's virtualization infrastructure.
|
||||||
|
This script integrates multiple components to provide a streamlined VM deployment process:
|
||||||
|
|
||||||
|
1. Salt Cloud Integration:
|
||||||
|
- Works with libvirt salt-cloud provider for VM creation
|
||||||
|
- Manages VM lifecycle from provisioning through configuration
|
||||||
|
- Handles profile-based deployment for consistent VM setups
|
||||||
|
|
||||||
|
2. Network Configuration Management:
|
||||||
|
- Supports both DHCP and static IPv4 networking
|
||||||
|
- Pre-configures network settings before VM deployment
|
||||||
|
- Integrates with qcow2.modify_network_config for image modification
|
||||||
|
- Ensures VMs boot with correct network configuration
|
||||||
|
|
||||||
|
3. Hardware Resource Management:
|
||||||
|
- Flexible CPU and memory allocation
|
||||||
|
- Advanced PCI device passthrough capabilities
|
||||||
|
- Controlled VM startup sequence
|
||||||
|
- Uses qcow2.modify_hardware_config for hardware settings
|
||||||
|
|
||||||
|
4. Security Integration:
|
||||||
|
- Automatic firewall rule configuration
|
||||||
|
- Directly integrates with so-firewall for consistent VM management
|
||||||
|
- Configures role-based firewall rules for new VMs
|
||||||
|
- Uses same firewall integration approach for both adding and removing VMs
|
||||||
|
|
||||||
|
This script serves as the primary interface for VM deployment in Security Onion, coordinating
|
||||||
|
between salt-cloud, network configuration, hardware management, and security components to
|
||||||
|
ensure proper VM provisioning and configuration.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Create a VM:
|
||||||
|
so-salt-cloud -p <profile> <vm_name> (--dhcp4 | --static4 --ip4 <ip_address> --gw4 <gateway>)
|
||||||
|
[-c <cpu_count>] [-m <memory_amount>] [-P <pci_id>] [-P <pci_id> ...] [--dns4 <dns_servers>] [--search4 <search_domain>]
|
||||||
|
|
||||||
|
# Delete a VM:
|
||||||
|
so-salt-cloud -p <profile> <vm_name> -d [-y]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-p, --profile The cloud profile to build the VM from.
|
||||||
|
<vm_name> The name of the VM.
|
||||||
|
-d, --destroy Delete the specified VM.
|
||||||
|
-y, --assume-yes Default yes in answer to all confirmation questions.
|
||||||
|
|
||||||
|
Network Configuration (required for VM creation):
|
||||||
|
--dhcp4 Configure interface for DHCP (IPv4).
|
||||||
|
--static4 Configure interface for static IPv4 settings.
|
||||||
|
--ip4 IPv4 address (e.g., 192.168.1.10/24). Required for static IPv4 configuration.
|
||||||
|
--gw4 IPv4 gateway (e.g., 192.168.1.1). Required for static IPv4 configuration.
|
||||||
|
--dns4 Comma-separated list of IPv4 DNS servers (e.g., 8.8.8.8,8.8.4.4).
|
||||||
|
--search4 DNS search domain for IPv4.
|
||||||
|
|
||||||
|
Hardware Configuration (optional):
|
||||||
|
-c, --cpu Number of virtual CPUs to assign.
|
||||||
|
-m, --memory Amount of memory to assign in MiB.
|
||||||
|
-P, --pci PCI hardware ID(s) to passthrough to the VM (e.g., 0000:c7:00.0). Can be specified multiple times.
|
||||||
|
Format: domain:bus:device.function
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
1. Static IP Configuration with Multiple PCI Devices:
|
||||||
|
|
||||||
|
Command:
|
||||||
|
so-salt-cloud -p sool9-hyper1 vm1_sensor --static4 --ip4 192.168.1.10/24 --gw4 192.168.1.1 \
|
||||||
|
--dns4 192.168.1.1,192.168.1.2 --search4 example.local -c 4 -m 8192 -P 0000:c7:00.0 -P 0000:c4:00.0
|
||||||
|
|
||||||
|
This command provisions a VM named vm1_sensor using the sool9-hyper1 profile with the following settings:
|
||||||
|
|
||||||
|
- Static IPv4 configuration:
|
||||||
|
- IP Address: 192.168.1.10/24
|
||||||
|
- Gateway: 192.168.1.1
|
||||||
|
- DNS Servers: 192.168.1.1, 192.168.1.2
|
||||||
|
- DNS Search Domain: example.local
|
||||||
|
- Hardware Configuration:
|
||||||
|
- CPUs: 4
|
||||||
|
- Memory: 8192 MiB
|
||||||
|
- PCI Device Passthrough: 0000:c7:00.0, 0000:c4:00.0
|
||||||
|
|
||||||
|
2. DHCP Configuration with Default Hardware Settings:
|
||||||
|
|
||||||
|
Command:
|
||||||
|
so-salt-cloud -p sool9-hyper1 vm2_master --dhcp4
|
||||||
|
|
||||||
|
This command provisions a VM named vm2_master using the sool9-hyper1 profile with DHCP for network configuration and default hardware settings.
|
||||||
|
|
||||||
|
3. Static IP Configuration without Hardware Specifications:
|
||||||
|
|
||||||
|
Command:
|
||||||
|
so-salt-cloud -p sool9-hyper1 vm3_search --static4 --ip4 192.168.1.20/24 --gw4 192.168.1.1
|
||||||
|
|
||||||
|
This command provisions a VM named vm3_search with a static IP configuration and default hardware settings.
|
||||||
|
|
||||||
|
4. DHCP Configuration with Custom Hardware Specifications and Multiple PCI Devices:
|
||||||
|
|
||||||
|
Command:
|
||||||
|
so-salt-cloud -p sool9-hyper1 vm4_node --dhcp4 -c 8 -m 16384 -P 0000:c7:00.0 -P 0000:c4:00.0 -P 0000:c4:00.1
|
||||||
|
|
||||||
|
This command provisions a VM named vm4_node using DHCP for network configuration and custom hardware settings:
|
||||||
|
|
||||||
|
- CPUs: 8
|
||||||
|
- Memory: 16384 MiB
|
||||||
|
- PCI Device Passthrough: 0000:c7:00.0, 0000:c4:00.0, 0000:c4:00.1
|
||||||
|
|
||||||
|
5. Static IP Configuration with DNS and Search Domain:
|
||||||
|
|
||||||
|
Command:
|
||||||
|
so-salt-cloud -p sool9-hyper1 vm1_sensor --static4 --ip4 192.168.1.10/24 --gw4 192.168.1.1 --dns4 192.168.1.1 --search4 example.local
|
||||||
|
|
||||||
|
This command provisions a VM named vm1_sensor using the sool9-hyper1 profile with static IPv4 configuration:
|
||||||
|
|
||||||
|
- Static IPv4 configuration:
|
||||||
|
- IP Address: 192.168.1.10/24
|
||||||
|
- Gateway: 192.168.1.1
|
||||||
|
- DNS Server: 192.168.1.1
|
||||||
|
- DNS Search Domain: example.local
|
||||||
|
|
||||||
|
6. Delete a VM with Confirmation:
|
||||||
|
|
||||||
|
Command:
|
||||||
|
so-salt-cloud -p sool9-hyper1 vm1_sensor -d
|
||||||
|
|
||||||
|
This command deletes the VM named vm1_sensor and will prompt for confirmation before proceeding.
|
||||||
|
|
||||||
|
7. Delete a VM without Confirmation:
|
||||||
|
|
||||||
|
Command:
|
||||||
|
so-salt-cloud -p sool9-hyper1 vm1_sensor -yd
|
||||||
|
|
||||||
|
This command deletes the VM named vm1_sensor without prompting for confirmation.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
- When using --static4, both --ip4 and --gw4 options are required.
|
||||||
|
- The script assumes the cloud profile name follows the format basedomain-hypervisorname.
|
||||||
|
- Hardware parameters (-c, -m, -P) are optional. If not provided, default values from the profile will be used.
|
||||||
|
- The -P or --pci option can be specified multiple times to pass through multiple PCI devices to the VM.
|
||||||
|
- The vm_name should include the role of the VM after an underscore (e.g., hostname_role), as the script uses this to determine the VM's role for firewall configuration.
|
||||||
|
- PCI hardware IDs must be in the format domain:bus:device.function (e.g., 0000:c7:00.0).
|
||||||
|
|
||||||
|
Description:
|
||||||
|
|
||||||
|
The so-salt-cloud script automates the provisioning and configuration of virtual machines in Security Onion's infrastructure. It orchestrates multiple components to ensure proper VM setup and security configuration. The script executes in the following phases:
|
||||||
|
|
||||||
|
1. Network Configuration Phase:
|
||||||
|
- Pre-deployment network setup using qcow2.modify_network_config
|
||||||
|
- Supports both DHCP and static IPv4 configurations
|
||||||
|
- Modifies the base QCOW2 image directly to ensure network settings persist
|
||||||
|
- Handles DNS and search domain configuration for proper name resolution
|
||||||
|
- Validates network parameters before modification
|
||||||
|
- Ensures network settings are in place before VM creation
|
||||||
|
|
||||||
|
2. VM Provisioning Phase:
|
||||||
|
- Leverages salt-cloud for consistent VM deployment
|
||||||
|
- Uses predefined profiles for standardized configurations
|
||||||
|
- Manages the VM lifecycle through libvirt
|
||||||
|
- Prevents automatic VM start to allow hardware configuration
|
||||||
|
- Validates profile and VM name format
|
||||||
|
- Extracts role information from VM name for security configuration
|
||||||
|
|
||||||
|
3. Hardware Configuration Phase:
|
||||||
|
- Configures VM hardware through qcow2.modify_hardware_config
|
||||||
|
- Manages CPU allocation based on host capabilities
|
||||||
|
- Handles memory assignment in MiB units
|
||||||
|
- Supports multiple PCI device passthrough for advanced networking
|
||||||
|
- Validates hardware parameters against host resources
|
||||||
|
- Controls VM startup sequence after configuration
|
||||||
|
|
||||||
|
4. Security Integration Phase:
|
||||||
|
- Monitors salt-cloud output for VM IP address assignment
|
||||||
|
- Extracts role information from VM name
|
||||||
|
- Calls so-firewall directly to configure firewall rules
|
||||||
|
- Configures role-based firewall rules automatically
|
||||||
|
- Ensures security policies are in place for VM access
|
||||||
|
- Logs all security-related operations for audit purposes
|
||||||
|
|
||||||
|
The script implements extensive error handling and logging throughout each phase:
|
||||||
|
- Validates all input parameters before execution
|
||||||
|
- Provides detailed error messages for troubleshooting
|
||||||
|
- Logs operations to both file and console
|
||||||
|
- Handles process interruption gracefully
|
||||||
|
- Ensures atomic operations where possible
|
||||||
|
- Maintains audit trail of all configuration changes
|
||||||
|
|
||||||
|
Integration points:
|
||||||
|
- Works with Security Onion's salt-cloud provider
|
||||||
|
- Interfaces with qcow2 module for image and hardware management
|
||||||
|
- Directly integrates with so-firewall for security configuration
|
||||||
|
- Uses libvirt for VM management
|
||||||
|
- Leverages SaltStack for distributed execution
|
||||||
|
|
||||||
|
Exit Codes:
|
||||||
|
|
||||||
|
- 0: Success
|
||||||
|
- Non-zero: An error occurred during execution.
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
|
||||||
|
- Logs are written to /opt/so/log/salt/so-salt-cloud.log.
|
||||||
|
- Both file and console logging are enabled for real-time monitoring.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import salt.client
|
||||||
|
import logging
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
# Initialize Salt local client
|
||||||
|
local = salt.client.LocalClient()
|
||||||
|
|
||||||
|
# Set up logging
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
file_handler = logging.FileHandler('/opt/so/log/salt/so-salt-cloud.log')
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
|
||||||
|
formatter = logging.Formatter('%(asctime)s %(message)s')
|
||||||
|
file_handler.setFormatter(formatter)
|
||||||
|
console_handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
|
||||||
|
def add_host_to_firewall(ip, role):
|
||||||
|
"""Configure firewall rules for a new VM.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ip (str): The IP address of the VM to add to the firewall
|
||||||
|
role (str): The role of the VM (e.g., 'sensor', 'manager', etc.)
|
||||||
|
|
||||||
|
This function calls so-firewall directly to configure firewall rules,
|
||||||
|
maintaining consistency with how firewall rules are managed during
|
||||||
|
VM deletion.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Call so-firewall directly with --apply
|
||||||
|
process = subprocess.Popen(
|
||||||
|
['/usr/sbin/so-firewall', 'includehost', role.lower(), ip, '--apply'],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Read and log the output
|
||||||
|
for line in iter(process.stdout.readline, ''):
|
||||||
|
if line:
|
||||||
|
logger.info(line.rstrip('\n'))
|
||||||
|
|
||||||
|
process.stdout.close()
|
||||||
|
process.wait()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred while adding host to firewall: {e}")
|
||||||
|
|
||||||
|
def get_vm_ip(vm_name):
|
||||||
|
"""Get IP address of VM before deletion"""
|
||||||
|
try:
|
||||||
|
# Get IP from minion's pillar file
|
||||||
|
pillar_file = f"/opt/so/saltstack/local/pillar/minions/{vm_name}.sls"
|
||||||
|
with open(pillar_file, 'r') as f:
|
||||||
|
pillar_data = yaml.safe_load(f)
|
||||||
|
|
||||||
|
if pillar_data and 'host' in pillar_data and 'mainip' in pillar_data['host']:
|
||||||
|
return pillar_data['host']['mainip']
|
||||||
|
raise Exception(f"Could not find mainip in pillar file {pillar_file}")
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise Exception(f"Pillar file not found: {pillar_file}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to get IP for VM {vm_name}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def cleanup_deleted_vm(ip, role):
|
||||||
|
"""Handle cleanup tasks when a VM is deleted"""
|
||||||
|
try:
|
||||||
|
# Remove IP from firewall
|
||||||
|
process = subprocess.Popen(
|
||||||
|
['/usr/sbin/so-firewall', '--apply', 'removehost', ip],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
for line in iter(process.stdout.readline, ''):
|
||||||
|
if line:
|
||||||
|
logger.info(line.rstrip('\n'))
|
||||||
|
|
||||||
|
process.stdout.close()
|
||||||
|
process.wait()
|
||||||
|
|
||||||
|
if process.returncode == 0:
|
||||||
|
logger.info(f"Successfully removed IP {ip} from firewall configuration")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to remove IP {ip} from firewall configuration")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during VM cleanup: {e}")
|
||||||
|
|
||||||
|
def delete_vm(profile, vm_name, assume_yes=False):
|
||||||
|
"""Delete a VM and perform cleanup tasks"""
|
||||||
|
try:
|
||||||
|
# Get VM's IP before deletion for cleanup
|
||||||
|
ip = get_vm_ip(vm_name)
|
||||||
|
role = vm_name.split("_")[1]
|
||||||
|
|
||||||
|
# Run salt-cloud destroy command
|
||||||
|
cmd = ['salt-cloud', '-p', profile, vm_name, '-d']
|
||||||
|
if assume_yes:
|
||||||
|
cmd.append('-y')
|
||||||
|
|
||||||
|
process = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pattern to detect when no machines were found to be destroyed
|
||||||
|
no_machines_string = 'No machines were found to be destroyed'
|
||||||
|
no_machines_pattern = re.compile(re.escape(no_machines_string))
|
||||||
|
|
||||||
|
# Track if we found any successful destruction
|
||||||
|
machines_destroyed = False
|
||||||
|
output_lines = []
|
||||||
|
|
||||||
|
# Monitor output
|
||||||
|
for line in iter(process.stdout.readline, ''):
|
||||||
|
if line:
|
||||||
|
logger.info(line.rstrip('\n'))
|
||||||
|
output_lines.append(line.strip())
|
||||||
|
|
||||||
|
# Check if no machines were found to be destroyed
|
||||||
|
if no_machines_pattern.search(line):
|
||||||
|
machines_destroyed = False
|
||||||
|
break
|
||||||
|
|
||||||
|
process.stdout.close()
|
||||||
|
process.wait()
|
||||||
|
|
||||||
|
# If we hit the "No machines were found" case, it's a failure
|
||||||
|
if no_machines_pattern.search('\n'.join(output_lines)):
|
||||||
|
logger.error(f"VM {vm_name} was not found to be destroyed. Verify that all configured hypervisors are online.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Check for successful destruction patterns in the output
|
||||||
|
# Look for the VM name appearing in libvirt section - this indicates successful processing
|
||||||
|
full_output = '\n'.join(output_lines)
|
||||||
|
if vm_name in full_output and 'libvirt:' in full_output:
|
||||||
|
# VM was processed by libvirt, which means destruction was attempted
|
||||||
|
# If we reach here and didn't hit the "No machines found" case, it's success
|
||||||
|
machines_destroyed = True
|
||||||
|
|
||||||
|
# Check success criteria: returncode == 0 AND we found evidence of destruction
|
||||||
|
if process.returncode == 0 and machines_destroyed:
|
||||||
|
# Start cleanup tasks only when actual deletion occurred
|
||||||
|
cleanup_deleted_vm(ip, role)
|
||||||
|
logger.info(f"Successfully deleted VM {vm_name}")
|
||||||
|
elif process.returncode == 0:
|
||||||
|
# Command succeeded but we couldn't confirm destruction - this is the edge case we're fixing
|
||||||
|
# If salt-cloud returned 0 and we didn't hit the "No machines found" case,
|
||||||
|
# but we also don't see clear destruction evidence, we should still consider it success
|
||||||
|
# because salt-cloud returning 0 means it completed successfully
|
||||||
|
cleanup_deleted_vm(ip, role)
|
||||||
|
logger.info(f"Successfully deleted VM {vm_name} (salt-cloud completed successfully)")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to delete VM {vm_name}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to delete VM {vm_name}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _add_hypervisor_host_key(hostname):
|
||||||
|
"""Add hypervisor host key to root's known_hosts file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hostname (str): The hostname or IP of the hypervisor
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if key was added or already exists, False on error
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
known_hosts = '/root/.ssh/known_hosts'
|
||||||
|
os.makedirs(os.path.dirname(known_hosts), exist_ok=True)
|
||||||
|
|
||||||
|
# Check if key already exists using ssh-keygen
|
||||||
|
if os.path.exists(known_hosts):
|
||||||
|
check_result = subprocess.run(['ssh-keygen', '-F', hostname],
|
||||||
|
capture_output=True, text=True)
|
||||||
|
if check_result.returncode == 0 and check_result.stdout.strip():
|
||||||
|
logger.info("Host key for %s already in known_hosts", hostname)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Get host key using ssh-keyscan
|
||||||
|
logger.info("Scanning host key for %s", hostname)
|
||||||
|
process = subprocess.run(['ssh-keyscan', '-H', hostname],
|
||||||
|
capture_output=True, text=True)
|
||||||
|
|
||||||
|
if process.returncode == 0 and process.stdout:
|
||||||
|
# Append new key
|
||||||
|
with open(known_hosts, 'a') as f:
|
||||||
|
f.write(process.stdout)
|
||||||
|
logger.info("Added host key for %s to known_hosts", hostname)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error("Failed to get host key for %s: %s",
|
||||||
|
hostname, process.stderr)
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Error adding host key for %s: %s", hostname, str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def call_salt_cloud(profile, vm_name, destroy=False, assume_yes=False):
|
||||||
|
"""Call salt-cloud to create or destroy a VM"""
|
||||||
|
try:
|
||||||
|
if destroy:
|
||||||
|
delete_vm(profile, vm_name, assume_yes)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract hypervisor hostname from profile (e.g., sool9-jpphype1 -> jpphype1)
|
||||||
|
hypervisor = profile.split('-', 1)[1] if '-' in profile else None
|
||||||
|
if hypervisor:
|
||||||
|
logger.info("Ensuring host key exists for hypervisor %s", hypervisor)
|
||||||
|
if not _add_hypervisor_host_key(hypervisor):
|
||||||
|
logger.error("Failed to add host key for %s, cannot proceed with VM creation", hypervisor)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Start the salt-cloud command as a subprocess
|
||||||
|
process = subprocess.Popen(
|
||||||
|
['salt-cloud', '-p', profile, vm_name, '-l', 'info'],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
role = vm_name.split("_")[1]
|
||||||
|
|
||||||
|
ip_search_string = '[INFO ] Address ='
|
||||||
|
ip_search_pattern = re.compile(re.escape(ip_search_string))
|
||||||
|
|
||||||
|
# Continuously read the output from salt-cloud
|
||||||
|
while True:
|
||||||
|
# Read stdout line by line
|
||||||
|
line = process.stdout.readline()
|
||||||
|
if line:
|
||||||
|
logger.info(line.rstrip('\n'))
|
||||||
|
|
||||||
|
if ip_search_pattern.search(line):
|
||||||
|
parts = line.split("Address =")
|
||||||
|
if len(parts) > 1:
|
||||||
|
ip_address = parts[1].strip()
|
||||||
|
logger.info(f"Extracted IP address: {ip_address}")
|
||||||
|
# Create and start a thread to add host to firewall
|
||||||
|
thread = threading.Thread(target=add_host_to_firewall, args=(ip_address, role))
|
||||||
|
thread.start()
|
||||||
|
else:
|
||||||
|
logger.error("No IP address found.")
|
||||||
|
else:
|
||||||
|
# Check if salt-cloud has terminated
|
||||||
|
if process.poll() is not None:
|
||||||
|
break
|
||||||
|
|
||||||
|
process.stdout.close()
|
||||||
|
process.wait()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred while calling salt-cloud: {e}")
|
||||||
|
|
||||||
|
def format_qcow2_output(operation, result):
|
||||||
|
"""Format the output from qcow2 module operations for better readability.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation (str): The name of the operation (e.g., 'Network configuration', 'Hardware configuration')
|
||||||
|
result (dict): The result dictionary from the qcow2 module
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None - logs the formatted output directly
|
||||||
|
"""
|
||||||
|
for host, host_result in result.items():
|
||||||
|
if isinstance(host_result, dict):
|
||||||
|
# Extract and format stderr which contains the detailed log
|
||||||
|
if 'stderr' in host_result:
|
||||||
|
logger.info(f"{operation} on {host}:")
|
||||||
|
for line in host_result['stderr'].split('\n'):
|
||||||
|
if line.strip():
|
||||||
|
logger.info(f" {line.strip()}")
|
||||||
|
if host_result.get('retcode', 0) != 0:
|
||||||
|
logger.error(f"{operation} failed on {host} with return code {host_result.get('retcode')}")
|
||||||
|
else:
|
||||||
|
logger.info(f"{operation} result from {host}: {host_result}")
|
||||||
|
|
||||||
|
def run_qcow2_modify_hardware_config(profile, vm_name, cpu=None, memory=None, pci_list=None, start=False):
|
||||||
|
hv_name = profile.split('-')[1]
|
||||||
|
target = hv_name + "_*"
|
||||||
|
|
||||||
|
try:
|
||||||
|
args_list = [
|
||||||
|
'vm_name=' + vm_name,
|
||||||
|
'cpu=' + str(cpu) if cpu else '',
|
||||||
|
'memory=' + str(memory) if memory else '',
|
||||||
|
'start=' + str(start)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Add PCI devices if provided
|
||||||
|
if pci_list:
|
||||||
|
# Pass all PCI devices as a comma-separated list
|
||||||
|
args_list.append('pci=' + ','.join(pci_list))
|
||||||
|
|
||||||
|
result = local.cmd(target, 'qcow2.modify_hardware_config', args_list)
|
||||||
|
format_qcow2_output('Hardware configuration', result)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred while running qcow2.modify_hardware_config: {e}")
|
||||||
|
|
||||||
|
def run_qcow2_modify_network_config(profile, vm_name, mode, ip=None, gateway=None, dns=None, search_domain=None):
|
||||||
|
hv_name = profile.split('-')[1]
|
||||||
|
target = hv_name + "_*"
|
||||||
|
image = '/nsm/libvirt/images/sool9/sool9.qcow2'
|
||||||
|
interface = 'enp1s0'
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Base arguments that are always included
|
||||||
|
args = [
|
||||||
|
'image=' + image,
|
||||||
|
'interface=' + interface,
|
||||||
|
'mode=' + mode,
|
||||||
|
'vm_name=' + vm_name
|
||||||
|
]
|
||||||
|
|
||||||
|
# Only include IP-related arguments if not using DHCP
|
||||||
|
if mode != "dhcp4":
|
||||||
|
if ip:
|
||||||
|
args.append('ip4=' + ip)
|
||||||
|
if gateway:
|
||||||
|
args.append('gw4=' + gateway)
|
||||||
|
if dns:
|
||||||
|
args.append('dns4=' + dns)
|
||||||
|
if search_domain:
|
||||||
|
args.append('search4=' + search_domain)
|
||||||
|
|
||||||
|
result = local.cmd(target, 'qcow2.modify_network_config', args)
|
||||||
|
format_qcow2_output('Network configuration', result)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred while running qcow2.modify_network_config: {e}")
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
parser = argparse.ArgumentParser(description="Call salt-cloud and pass the profile and VM name to it.")
|
||||||
|
parser.add_argument('-p', '--profile', type=str, required=True, help="The cloud profile to build the VM from.")
|
||||||
|
parser.add_argument('vm_name', type=str, help="The name of the VM.")
|
||||||
|
parser.add_argument('-d', '--destroy', action='store_true', help='Delete the specified VM')
|
||||||
|
parser.add_argument('-y', '--assume-yes', action='store_true', help='Default yes in answer to all confirmation questions')
|
||||||
|
|
||||||
|
# Create a group for network config arguments
|
||||||
|
network_group = parser.add_argument_group('Network Configuration')
|
||||||
|
# Make the group mutually exclusive but not required by default
|
||||||
|
mode_group = network_group.add_mutually_exclusive_group()
|
||||||
|
mode_group.add_argument("--dhcp4", action="store_true", help="Configure interface for DHCP (IPv4).")
|
||||||
|
mode_group.add_argument("--static4", action="store_true", help="Configure interface for static IPv4 settings.")
|
||||||
|
|
||||||
|
# Add other network and hardware arguments
|
||||||
|
network_group.add_argument("--ip4", help="IPv4 address (e.g., 192.168.1.10/24). Required for static IPv4 configuration.")
|
||||||
|
network_group.add_argument("--gw4", help="IPv4 gateway (e.g., 192.168.1.1). Required for static IPv4 configuration.")
|
||||||
|
network_group.add_argument("--dns4", help="Comma-separated list of IPv4 DNS servers (e.g., 8.8.8.8,8.8.4.4).")
|
||||||
|
network_group.add_argument("--search4", help="DNS search domain for IPv4.")
|
||||||
|
network_group.add_argument('-c', '--cpu', type=int, help='Number of virtual CPUs to assign.')
|
||||||
|
network_group.add_argument('-m', '--memory', type=int, help='Amount of memory to assign in MiB.')
|
||||||
|
network_group.add_argument('-P', '--pci', action='append', help='PCI hardware ID(s) to passthrough to the VM (e.g., 0000:c7:00.0). Can be specified multiple times.')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Only validate network config if not destroying
|
||||||
|
if not args.destroy:
|
||||||
|
if not args.dhcp4 and not args.static4:
|
||||||
|
parser.error("One of --dhcp4 or --static4 is required for VM creation")
|
||||||
|
if args.static4 and (not args.ip4 or not args.gw4):
|
||||||
|
parser.error("Both --ip4 and --gw4 are required for static IPv4 configuration")
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
def main():
|
||||||
|
try:
|
||||||
|
args = parse_arguments()
|
||||||
|
|
||||||
|
# Log the initial request
|
||||||
|
if args.destroy:
|
||||||
|
logger.info(f"Received request to destroy VM '{args.vm_name}' using profile '{args.profile}'{' with --assume-yes' if args.assume_yes else ''}")
|
||||||
|
else:
|
||||||
|
# Build network config string
|
||||||
|
network_config = "using DHCP" if args.dhcp4 else f"with static IP {args.ip4}, gateway {args.gw4}"
|
||||||
|
if args.dns4:
|
||||||
|
network_config += f", DNS {args.dns4}"
|
||||||
|
if args.search4:
|
||||||
|
network_config += f", search domain {args.search4}"
|
||||||
|
|
||||||
|
# Build hardware config string
|
||||||
|
hw_config = []
|
||||||
|
if args.cpu:
|
||||||
|
hw_config.append(f"{args.cpu} CPUs")
|
||||||
|
if args.memory:
|
||||||
|
hw_config.append(f"{args.memory}MB RAM")
|
||||||
|
if args.pci:
|
||||||
|
hw_config.append(f"PCI devices: {', '.join(args.pci)}")
|
||||||
|
hw_string = f" and hardware config: {', '.join(hw_config)}" if hw_config else ""
|
||||||
|
|
||||||
|
logger.info(f"Received request to create VM '{args.vm_name}' using profile '{args.profile}' {network_config}{hw_string}")
|
||||||
|
|
||||||
|
if args.destroy:
|
||||||
|
# Handle VM deletion
|
||||||
|
call_salt_cloud(args.profile, args.vm_name, destroy=True, assume_yes=args.assume_yes)
|
||||||
|
else:
|
||||||
|
# Handle VM creation
|
||||||
|
if args.dhcp4:
|
||||||
|
mode = "dhcp4"
|
||||||
|
elif args.static4:
|
||||||
|
mode = "static4"
|
||||||
|
else:
|
||||||
|
mode = "dhcp4" # Default to DHCP if not specified
|
||||||
|
|
||||||
|
# Step 1: Modify network configuration
|
||||||
|
run_qcow2_modify_network_config(args.profile, args.vm_name, mode, args.ip4, args.gw4, args.dns4, args.search4)
|
||||||
|
|
||||||
|
# Step 2: Provision the VM (without starting it)
|
||||||
|
call_salt_cloud(args.profile, args.vm_name)
|
||||||
|
|
||||||
|
# Step 3: Modify hardware configuration
|
||||||
|
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=args.cpu, memory=args.memory, pci_list=args.pci, start=True)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.error("so-salt-cloud: Operation cancelled by user.")
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"so-salt-cloud: An error occurred: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
|
{%- else -%}
|
||||||
|
|
||||||
|
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
|
||||||
|
{% endif -%}
|
||||||
@@ -123,7 +123,7 @@ so-nginx:
|
|||||||
- /opt/so/tmp/nginx/:/run:rw
|
- /opt/so/tmp/nginx/:/run:rw
|
||||||
- /nsm/elastic-fleet/so_agent-installers/:/opt/socore/html/packages
|
- /nsm/elastic-fleet/so_agent-installers/:/opt/socore/html/packages
|
||||||
- /nsm/elastic-fleet/artifacts/:/opt/socore/html/artifacts
|
- /nsm/elastic-fleet/artifacts/:/opt/socore/html/artifacts
|
||||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %}
|
{% if GLOBALS.is_manager %}
|
||||||
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
|
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
|
||||||
- /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
|
- /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
|
||||||
# ATT&CK Navigator binds
|
# ATT&CK Navigator binds
|
||||||
@@ -156,7 +156,7 @@ so-nginx:
|
|||||||
- file: nginxconfdir
|
- file: nginxconfdir
|
||||||
- require:
|
- require:
|
||||||
- file: nginxconf
|
- file: nginxconf
|
||||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %}
|
{% if GLOBALS.is_manager %}
|
||||||
{% if NGINXMERGED.ssl.replace_cert %}
|
{% if NGINXMERGED.ssl.replace_cert %}
|
||||||
- file: managerssl_key
|
- file: managerssl_key
|
||||||
- file: managerssl_crt
|
- file: managerssl_crt
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ http {
|
|||||||
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
|
{%- if GLOBALS.is_manager %}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80 default_server;
|
listen 80 default_server;
|
||||||
@@ -108,7 +108,7 @@ http {
|
|||||||
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
|
{%- if GLOBALS.is_manager %}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 7788;
|
listen 7788;
|
||||||
|
|||||||
130
salt/orch/dyanno_hypervisor.sls
Normal file
130
salt/orch/dyanno_hypervisor.sls
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
{% do salt.log.info('dyanno_hypervisor_orch: Running') %}
|
||||||
|
{% set vm_name = None %}
|
||||||
|
{% set hypervisor = None %}
|
||||||
|
{% set status = None %}
|
||||||
|
{% set data = pillar.get('data', {}) %}
|
||||||
|
{% set tag = pillar.get('tag', '') %}
|
||||||
|
{% set timestamp = data.get('_stamp') %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: tag: ' ~ tag) %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Received data: ' ~ data|json|string) %}
|
||||||
|
|
||||||
|
{# Macro to find hypervisor name from VM status file #}
|
||||||
|
{% macro find_hypervisor_from_status(vm_name) -%}
|
||||||
|
{%- set path = salt['file.find']('/opt/so/saltstack/local/salt/hypervisor/hosts/',type='f', name=vm_name ~ '.status') -%}
|
||||||
|
{%- if path | length == 1 -%}
|
||||||
|
{%- set parts = path[0].split('/') -%}
|
||||||
|
{%- set hypervisor = parts[-2] -%}
|
||||||
|
{%- do salt.log.debug('dyanno_hypervisor_orch: Found hypervisor from file.find: ' ~ hypervisor) -%}
|
||||||
|
{{- hypervisor -}}
|
||||||
|
{%- elif path | length == 0 -%}
|
||||||
|
{%- do salt.log.error('dyanno_hypervisor_orch: ' ~ vm_name ~ ' not found in any hypervisor directories') -%}
|
||||||
|
{{- '' -}}
|
||||||
|
{%- else -%}
|
||||||
|
{%- do salt.log.error('dyanno_hypervisor_orch: Found ' ~ vm_name ~ ' in multiple hypervisor directories: ' ~ path | string) -%}
|
||||||
|
{{- '' -}}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endmacro %}
|
||||||
|
|
||||||
|
{# Our custom tag #}
|
||||||
|
{% if tag.startswith('soc/dyanno/hypervisor') %}
|
||||||
|
{% set status_data = data.get('data')%}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Received data: ' ~ status_data|json|string) %}
|
||||||
|
{% if not tag.endswith('/baseDomain') %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Setting vm_name, hypervisor and status') %}
|
||||||
|
{% set vm_name = status_data.get('vm_name') %}
|
||||||
|
{% set hypervisor = status_data.get('hypervisor') %}
|
||||||
|
{% else %}
|
||||||
|
{% set hypervisor = data.get('id') %}
|
||||||
|
{% endif %}
|
||||||
|
{% set status = status_data.get('status') %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# setup/so-minion tag #}
|
||||||
|
{% if tag == ('setup/so-minion') %}
|
||||||
|
{% set status_data = data.get('data')%}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Received data: ' ~ status_data|json|string) %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Setting vm_name, hypervisor and status') %}
|
||||||
|
{% set vm_name = data.get('id') %}
|
||||||
|
{% set hypervisor = find_hypervisor_from_status(vm_name) %}
|
||||||
|
{% set status = 'Initialize Minion Pillars' %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
{# salt-cloud tag #}
|
||||||
|
{% if tag.startswith('salt/cloud/') and (tag.endswith('/creating') or tag.endswith('/deploying') or tag.endswith('/created') or tag.endswith('/destroyed')) %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Received data: ' ~ data|json|string) %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Setting vm_name, hypervisor and status') %}
|
||||||
|
{% set vm_name = tag.split('/')[2] %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Got vm_name from tag: ' ~ vm_name) %}
|
||||||
|
{% if tag.endswith('/deploying') %}
|
||||||
|
{% set hypervisor = data.get('kwargs').get('cloud_grains').get('profile').split('-')[1] %}
|
||||||
|
{% endif %}
|
||||||
|
{# Set the hypervisor #}
|
||||||
|
{# First try to get it from the event #}
|
||||||
|
{% if data.get('profile', False) %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Did not get cache.grains.') %}
|
||||||
|
{% set hypervisor = data.profile.split('-')[1] %}
|
||||||
|
{% do salt.log.debug('dyanno_hypervisor_orch: Got hypervisor from data: ' ~ hypervisor) %}
|
||||||
|
{% else %}
|
||||||
|
{% set hypervisor = find_hypervisor_from_status(vm_name) %}
|
||||||
|
{% endif %}
|
||||||
|
{% set status = data.get('event').title() %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% do salt.log.info('dyanno_hypervisor_orch: vm_name: ' ~ vm_name ~ ' hypervisor: ' ~ hypervisor ~ ' status: ' ~ status) %}
|
||||||
|
|
||||||
|
{% if vm_name and hypervisor and timestamp and status and tag %}
|
||||||
|
write_vm_status:
|
||||||
|
salt.state:
|
||||||
|
- tgt: 'G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone or G@role:so-eval'
|
||||||
|
- tgt_type: compound
|
||||||
|
- sls:
|
||||||
|
- soc.dyanno.hypervisor.write_status
|
||||||
|
- concurrent: True
|
||||||
|
- pillar:
|
||||||
|
vm_name: {{ vm_name }}
|
||||||
|
hypervisor: {{ hypervisor }}
|
||||||
|
status_data:
|
||||||
|
timestamp: {{ timestamp }}
|
||||||
|
status: {{ status }}
|
||||||
|
event_tag: {{ tag }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Update hypervisor status
|
||||||
|
update_hypervisor_annotation:
|
||||||
|
salt.state:
|
||||||
|
- tgt: 'G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone or G@role:so-eval'
|
||||||
|
- tgt_type: compound
|
||||||
|
- sls:
|
||||||
|
- soc.dyanno.hypervisor
|
||||||
|
- concurrent: True
|
||||||
|
{% if tag == ('soc/dyanno/hypervisor/baseDomain') %}
|
||||||
|
- pillar:
|
||||||
|
baseDomain:
|
||||||
|
status: {{ status }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% do salt.log.info('dyanno_hypervisor_orch: Completed') %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{% do salt.log.error(
|
||||||
|
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||||
|
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||||
|
'for more information about purchasing a license to enable this feature.'
|
||||||
|
) %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
35
salt/orch/vm_pillar_clean.sls
Normal file
35
salt/orch/vm_pillar_clean.sls
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
{% do salt.log.debug('vm_pillar_clean_orch: Running') %}
|
||||||
|
{% set vm_name = pillar.get('vm_name') %}
|
||||||
|
|
||||||
|
delete_adv_{{ vm_name }}_pillar:
|
||||||
|
module.run:
|
||||||
|
- file.remove:
|
||||||
|
- path: /opt/so/saltstack/local/pillar/minions/adv_{{ vm_name }}.sls
|
||||||
|
|
||||||
|
delete_{{ vm_name }}_pillar:
|
||||||
|
module.run:
|
||||||
|
- file.remove:
|
||||||
|
- path: /opt/so/saltstack/local/pillar/minions/{{ vm_name }}.sls
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{% do salt.log.error(
|
||||||
|
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||||
|
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||||
|
'for more information about purchasing a license to enable this feature.'
|
||||||
|
) %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
5
salt/reactor/check_hypervisor.sls
Normal file
5
salt/reactor/check_hypervisor.sls
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{% if data['act'] == 'accept' and data['id'].endswith(('_hypervisor', '_managerhyper')) and data['result'] == True %}
|
||||||
|
check_and_trigger:
|
||||||
|
runner.setup_hypervisor.setup_environment:
|
||||||
|
- minion_id: {{ data['id'] }}
|
||||||
|
{% endif %}
|
||||||
38
salt/reactor/createEmptyPillar.sls
Normal file
38
salt/reactor/createEmptyPillar.sls
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
#!py
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pwd
|
||||||
|
import grp
|
||||||
|
|
||||||
|
def run():
|
||||||
|
vm_name = data['kwargs']['name']
|
||||||
|
logging.error("createEmptyPillar reactor: vm_name: %s" % vm_name)
|
||||||
|
pillar_root = '/opt/so/saltstack/local/pillar/minions/'
|
||||||
|
pillar_files = ['adv_' + vm_name + '.sls', vm_name + '.sls']
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get socore user and group IDs
|
||||||
|
socore_uid = pwd.getpwnam('socore').pw_uid
|
||||||
|
socore_gid = grp.getgrnam('socore').gr_gid
|
||||||
|
|
||||||
|
for f in pillar_files:
|
||||||
|
full_path = pillar_root + f
|
||||||
|
if not os.path.exists(full_path):
|
||||||
|
# Create empty file
|
||||||
|
os.mknod(full_path)
|
||||||
|
# Set ownership to socore:socore
|
||||||
|
os.chown(full_path, socore_uid, socore_gid)
|
||||||
|
# Set mode to 644 (rw-r--r--)
|
||||||
|
os.chmod(full_path, 0o640)
|
||||||
|
logging.error("createEmptyPillar reactor: created %s with socore:socore ownership and mode 644" % f)
|
||||||
|
|
||||||
|
except (KeyError, OSError) as e:
|
||||||
|
logging.error("createEmptyPillar reactor: Error setting ownership/permissions: %s" % str(e))
|
||||||
|
|
||||||
|
return {}
|
||||||
18
salt/reactor/deleteKey.sls
Normal file
18
salt/reactor/deleteKey.sls
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
remove_key:
|
||||||
|
wheel.key.delete:
|
||||||
|
- args:
|
||||||
|
- match: {{ data['name'] }}
|
||||||
|
|
||||||
|
{{ data['name'] }}_pillar_clean:
|
||||||
|
runner.state.orchestrate:
|
||||||
|
- args:
|
||||||
|
- mods: orch.vm_pillar_clean
|
||||||
|
- pillar:
|
||||||
|
vm_name: {{ data['name'] }}
|
||||||
|
|
||||||
|
{% do salt.log.info('deleteKey reactor: deleted minion key: %s' % data['name']) %}
|
||||||
45
salt/reactor/sominion_setup.sls
Normal file
45
salt/reactor/sominion_setup.sls
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
#!py
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from subprocess import call
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def run():
|
||||||
|
log.info('sominion_setup_reactor: Running')
|
||||||
|
minionid = data['id']
|
||||||
|
DATA = data['data']
|
||||||
|
hv_name = DATA['HYPERVISOR_HOST']
|
||||||
|
log.info('sominion_setup_reactor: DATA: %s' % DATA)
|
||||||
|
|
||||||
|
# Build the base command
|
||||||
|
cmd = "NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVM -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -c=" + str(DATA['CPUCORES']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'"
|
||||||
|
|
||||||
|
# Add optional arguments only if they exist in DATA
|
||||||
|
if 'CORECOUNT' in DATA:
|
||||||
|
cmd += " -C=" + str(DATA['CORECOUNT'])
|
||||||
|
|
||||||
|
if 'INTERFACE' in DATA:
|
||||||
|
cmd += " -a=" + DATA['INTERFACE']
|
||||||
|
|
||||||
|
if 'ES_HEAP_SIZE' in DATA:
|
||||||
|
cmd += " -e=" + DATA['ES_HEAP_SIZE']
|
||||||
|
|
||||||
|
if 'LS_HEAP_SIZE' in DATA:
|
||||||
|
cmd += " -l=" + DATA['LS_HEAP_SIZE']
|
||||||
|
|
||||||
|
if 'LSHOSTNAME' in DATA:
|
||||||
|
cmd += " -L=" + DATA['LSHOSTNAME']
|
||||||
|
|
||||||
|
log.info('sominion_setup_reactor: Command: %s' % cmd)
|
||||||
|
rc = call(cmd, shell=True)
|
||||||
|
|
||||||
|
log.info('sominion_setup_reactor: rc: %s' % rc)
|
||||||
|
|
||||||
|
return {}
|
||||||
36
salt/reactor/vm_status.sls
Normal file
36
salt/reactor/vm_status.sls
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% do salt.log.debug('vm_status_reactor: Running') %}
|
||||||
|
{% do salt.log.debug('vm_status_reactor: tag: ' ~ tag) %}
|
||||||
|
|
||||||
|
{# Remove all the nasty characters that exist in this data #}
|
||||||
|
{% if tag.startswith('salt/cloud/') and tag.endswith('/deploying') %}
|
||||||
|
|
||||||
|
{% set event_data = {
|
||||||
|
"_stamp": data._stamp,
|
||||||
|
"event": data.event,
|
||||||
|
"kwargs": {
|
||||||
|
"cloud_grains": data.kwargs.cloud_grains
|
||||||
|
}
|
||||||
|
} %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{% set event_data = data %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% do salt.log.debug('vm_status_reactor: Received data: ' ~ event_data|json|string) %}
|
||||||
|
|
||||||
|
update_hypervisor:
|
||||||
|
runner.state.orchestrate:
|
||||||
|
- args:
|
||||||
|
- mods: orch.dyanno_hypervisor
|
||||||
|
- pillar:
|
||||||
|
tag: {{ tag }}
|
||||||
|
data: {{ event_data }}
|
||||||
|
|
||||||
|
{% do salt.log.debug('vm_status_reactor: Completed') %}
|
||||||
@@ -49,7 +49,7 @@ so_repo:
|
|||||||
pkgrepo.managed:
|
pkgrepo.managed:
|
||||||
- name: securityonion
|
- name: securityonion
|
||||||
- humanname: Security Onion Repo
|
- humanname: Security Onion Repo
|
||||||
{% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-import', 'so-manager', 'so-managersearch'] %}
|
{% if GLOBALS.is_manager %}
|
||||||
- baseurl: file:///nsm/repo/
|
- baseurl: file:///nsm/repo/
|
||||||
{% else %}
|
{% else %}
|
||||||
- baseurl: https://{{ GLOBALS.repo_host }}/repo
|
- baseurl: https://{{ GLOBALS.repo_host }}/repo
|
||||||
|
|||||||
51
salt/salt/cloud/cloud.profiles.d/socloud.conf.jinja
Normal file
51
salt/salt/cloud/cloud.profiles.d/socloud.conf.jinja
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{#- Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{%- for role, hosts in HYPERVISORS.items() %}
|
||||||
|
{%- for host in hosts.keys() %}
|
||||||
|
|
||||||
|
sool9-{{host}}:
|
||||||
|
provider: kvm-ssh-{{host}}
|
||||||
|
base_domain: sool9
|
||||||
|
ip_source: qemu-agent
|
||||||
|
ssh_username: soqemussh
|
||||||
|
private_key: /etc/ssh/auth_keys/soqemussh/id_ed25519
|
||||||
|
sudo: True
|
||||||
|
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
|
||||||
|
script_args: -r -F -x python3 stable 3006.9
|
||||||
|
minion:
|
||||||
|
master: {{ grains.host }}
|
||||||
|
master_port: 4506
|
||||||
|
use_superseded:
|
||||||
|
- module.run
|
||||||
|
features:
|
||||||
|
x509_v2: true
|
||||||
|
log_level: info
|
||||||
|
log_level_logfile: info
|
||||||
|
log_file: /opt/so/log/salt/minion
|
||||||
|
grains:
|
||||||
|
hypervisor_host: {{host ~ "_" ~ role}}
|
||||||
|
preflight_cmds:
|
||||||
|
- |
|
||||||
|
tee -a /etc/hosts <<< "{{ MANAGERIP }} {{ MANAGERHOSTNAME }}"
|
||||||
|
- |
|
||||||
|
timeout 600 bash -c 'trap "echo \"Preflight Check: Failed to establish repo connectivity\"; exit 1" TERM; \
|
||||||
|
while ! dnf makecache --repoid=securityonion >/dev/null 2>&1; do echo "Preflight Check: Waiting for repo connectivity..."; \
|
||||||
|
sleep 5; done && echo "Preflight Check: Successfully connected to repo" || exit 1; [ $? -eq 0 ]'
|
||||||
|
# the destination directory will be created if it doesn't exist
|
||||||
|
#file_map:
|
||||||
|
# /opt/so/saltstack/default/salt/salt/mine_functions.sls: /opt/so/conf/salt/cloud_file_map/salt/salt/mine_functions.sls
|
||||||
|
# if calling states with pillar values, need to pass them in since minion pillars are not set until setup.virt.sominion state runs
|
||||||
|
inline_script:
|
||||||
|
- |
|
||||||
|
salt-call state.apply salt.mine_functions \
|
||||||
|
pillar='{"host": {"mainint": "enp1s0"}}'
|
||||||
|
- salt-call mine.update
|
||||||
|
- |
|
||||||
|
salt-call state.apply setup.virt \
|
||||||
|
pillar='{"host": {"mainint": "enp1s0"}}'
|
||||||
|
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endfor %}
|
||||||
22
salt/salt/cloud/cloud.providers.d/libvirt.conf.jinja
Normal file
22
salt/salt/cloud/cloud.providers.d/libvirt.conf.jinja
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{#- provider with qemu+ssh protocol #}
|
||||||
|
{%- for role, hosts in HYPERVISORS.items() %}
|
||||||
|
{%- for host in hosts.keys() %}
|
||||||
|
|
||||||
|
kvm-ssh-{{host}}:
|
||||||
|
driver: libvirt
|
||||||
|
url: qemu+ssh://soqemussh@{{host}}/system?socket=/var/run/libvirt/libvirt-sock
|
||||||
|
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
|
{#- local libvirt instance #}
|
||||||
|
#local-kvm:
|
||||||
|
# driver: libvirt
|
||||||
|
# url: qemu:///system
|
||||||
|
{#- work around flag for XML validation errors while cloning #}
|
||||||
|
# validate_xml: no
|
||||||
55
salt/salt/cloud/config.sls
Normal file
55
salt/salt/cloud/config.sls
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if '.'.join(sls.split('.')[:2]) in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
|
||||||
|
|
||||||
|
{% if HYPERVISORS %}
|
||||||
|
cloud_providers:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/cloud.providers.d/libvirt.conf
|
||||||
|
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
|
||||||
|
- defaults:
|
||||||
|
HYPERVISORS: {{HYPERVISORS}}
|
||||||
|
- template: jinja
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
cloud_profiles:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/cloud.profiles.d/socloud.conf
|
||||||
|
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
|
||||||
|
- defaults:
|
||||||
|
HYPERVISORS: {{HYPERVISORS}}
|
||||||
|
MANAGERHOSTNAME: {{ grains.host }}
|
||||||
|
MANAGERIP: {{ pillar.host.mainip }}
|
||||||
|
- template: jinja
|
||||||
|
- makedirs: True
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
43
salt/salt/cloud/init.sls
Normal file
43
salt/salt/cloud/init.sls
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
{% from 'salt/map.jinja' import SALTVERSION %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- libvirt.packages
|
||||||
|
- libvirt.64962
|
||||||
|
- libvirt.ssh.users
|
||||||
|
|
||||||
|
install_salt_cloud:
|
||||||
|
pkg.installed:
|
||||||
|
- name: salt-cloud
|
||||||
|
- version: {{SALTVERSION}}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
61
salt/salt/cloud/reactor_config_hypervisor.sls
Normal file
61
salt/salt/cloud/reactor_config_hypervisor.sls
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[:2]|join('.') in allowed_states %}
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
reactor_config_hypervisor:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/master.d/reactor_hypervisor.conf
|
||||||
|
- contents: |
|
||||||
|
reactor:
|
||||||
|
- 'salt/key':
|
||||||
|
- salt://reactor/check_hypervisor.sls
|
||||||
|
- 'salt/cloud/*/creating':
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/vm_status.sls
|
||||||
|
- 'salt/cloud/*/deploying':
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/createEmptyPillar.sls
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/vm_status.sls
|
||||||
|
- 'setup/so-minion':
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/sominion_setup.sls
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/vm_status.sls
|
||||||
|
- 'salt/cloud/*/created':
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/vm_status.sls
|
||||||
|
- 'soc/dyanno/hypervisor/*':
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/vm_status.sls
|
||||||
|
- 'salt/cloud/*/destroyed':
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/deleteKey.sls
|
||||||
|
- /opt/so/saltstack/default/salt/reactor/vm_status.sls
|
||||||
|
- user: root
|
||||||
|
- group: root
|
||||||
|
- mode: 644
|
||||||
|
- makedirs: True
|
||||||
|
- watch_in:
|
||||||
|
- service: salt_master_service
|
||||||
|
- order: last
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
985
salt/salt/engines/master/virtual_node_manager.py
Normal file
985
salt/salt/engines/master/virtual_node_manager.py
Normal file
@@ -0,0 +1,985 @@
|
|||||||
|
#!/opt/saltstack/salt/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
"""
|
||||||
|
Salt Engine for Virtual Node Management
|
||||||
|
|
||||||
|
This engine manages the automated provisioning of virtual machines in Security Onion's
|
||||||
|
virtualization infrastructure. It processes VM configurations from a VMs file and handles
|
||||||
|
the entire provisioning process including hardware allocation, state tracking, and file ownership.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
engines:
|
||||||
|
- virtual_node_manager:
|
||||||
|
interval: 30
|
||||||
|
base_path: /opt/so/saltstack/local/salt/hypervisor/hosts
|
||||||
|
|
||||||
|
Options:
|
||||||
|
interval: Time in seconds between engine runs (managed by salt-master, default: 30)
|
||||||
|
base_path: Base directory containing hypervisor configurations (default: /opt/so/saltstack/local/salt/hypervisor/hosts)
|
||||||
|
|
||||||
|
Memory values in VM configuration should be specified in GB. These values
|
||||||
|
will automatically be converted to MiB when passed to so-salt-cloud.
|
||||||
|
|
||||||
|
Configuration Files:
|
||||||
|
<hypervisorHostname>VMs: JSON file containing VM configurations
|
||||||
|
- Located at <base_path>/<hypervisorHostname>VMs
|
||||||
|
- Contains array of VM configurations
|
||||||
|
- Each VM config specifies hardware and network settings
|
||||||
|
- Hardware indices (disk, copper, sfp) must be specified as JSON arrays: "disk":["1","2"]
|
||||||
|
|
||||||
|
defaults.yaml: Hardware capabilities configuration
|
||||||
|
- Located at /opt/so/saltstack/default/salt/hypervisor/defaults.yaml
|
||||||
|
- Defines available hardware per model
|
||||||
|
- Maps hardware indices to PCI IDs
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. Basic Configuration:
|
||||||
|
engines:
|
||||||
|
- virtual_node_manager: {}
|
||||||
|
|
||||||
|
Uses default settings to process VM configurations.
|
||||||
|
|
||||||
|
2. Custom Interval:
|
||||||
|
engines:
|
||||||
|
- virtual_node_manager:
|
||||||
|
interval: 60
|
||||||
|
|
||||||
|
Processes configurations every 60 seconds.
|
||||||
|
|
||||||
|
State Files:
|
||||||
|
VM Tracking Files:
|
||||||
|
- <vm_name>: Active VM with status 'creating' or 'running'
|
||||||
|
- <vm_name>.error: Error state with detailed message
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Requires 'vrt' feature license
|
||||||
|
- Uses hypervisor's sosmodel grain for hardware capabilities
|
||||||
|
- Hardware allocation based on model-specific configurations
|
||||||
|
- All created files maintain socore ownership
|
||||||
|
- Comprehensive logging for troubleshooting
|
||||||
|
- Single engine-wide lock prevents concurrent instances
|
||||||
|
- Lock remains if error occurs (requires admin intervention)
|
||||||
|
|
||||||
|
Description:
|
||||||
|
The engine operates in the following phases:
|
||||||
|
|
||||||
|
1. Lock Acquisition
|
||||||
|
- Acquires single engine-wide lock
|
||||||
|
- Prevents multiple instances from running
|
||||||
|
- Lock remains until clean shutdown or error
|
||||||
|
|
||||||
|
2. License Validation
|
||||||
|
- Verifies 'vrt' feature is licensed
|
||||||
|
- Prevents operation if license is invalid
|
||||||
|
|
||||||
|
3. Configuration Processing
|
||||||
|
- Reads VMs file for each hypervisor
|
||||||
|
- Validates configuration parameters
|
||||||
|
- Compares against existing VM tracking files
|
||||||
|
|
||||||
|
4. Hardware Allocation
|
||||||
|
- Retrieves hypervisor model from grains cache
|
||||||
|
- Loads model-specific hardware capabilities
|
||||||
|
- Validates hardware requests against model limits
|
||||||
|
- Converts hardware indices to PCI IDs
|
||||||
|
- Ensures proper type handling for hardware indices
|
||||||
|
- Creates state tracking files with socore ownership
|
||||||
|
|
||||||
|
5. VM Provisioning
|
||||||
|
- Executes so-salt-cloud with validated configuration
|
||||||
|
- Handles network setup (static/DHCP)
|
||||||
|
- Configures hardware passthrough with converted PCI IDs
|
||||||
|
- Updates VM state tracking
|
||||||
|
|
||||||
|
Lock Management:
|
||||||
|
- Lock acquired at engine start
|
||||||
|
- Released only on clean shutdown
|
||||||
|
- Remains if error occurs
|
||||||
|
- Admin must restart service to clear lock
|
||||||
|
- Error-level logging for lock issues
|
||||||
|
|
||||||
|
Exit Codes:
|
||||||
|
0: Success
|
||||||
|
1: Invalid license
|
||||||
|
2: Configuration error
|
||||||
|
3: Hardware validation failure (hardware doesn't exist in model or is already in use by another VM)
|
||||||
|
4: VM provisioning failure (so-salt-cloud execution failed)
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
Log files are written to /opt/so/log/salt/engines/virtual_node_manager.log
|
||||||
|
Comprehensive logging includes:
|
||||||
|
- Hardware validation details
|
||||||
|
- PCI ID conversion process
|
||||||
|
- Command execution details
|
||||||
|
- Error conditions with full context
|
||||||
|
- File ownership operations
|
||||||
|
- Lock file management
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
import yaml
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
import pwd
|
||||||
|
import grp
|
||||||
|
import salt.config
|
||||||
|
import salt.runner
|
||||||
|
from typing import Dict, List, Optional, Tuple, Any
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from threading import Lock
|
||||||
|
|
||||||
|
# Get socore uid/gid
|
||||||
|
SOCORE_UID = pwd.getpwnam('socore').pw_uid
|
||||||
|
SOCORE_GID = grp.getgrnam('socore').gr_gid
|
||||||
|
|
||||||
|
# Initialize Salt runner once
|
||||||
|
opts = salt.config.master_config('/etc/salt/master')
|
||||||
|
opts['output'] = 'json'
|
||||||
|
runner = salt.runner.RunnerClient(opts)
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
log.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
# Constants
|
||||||
|
DEFAULT_INTERVAL = 30
|
||||||
|
DEFAULT_BASE_PATH = '/opt/so/saltstack/local/salt/hypervisor/hosts'
|
||||||
|
VALID_ROLES = ['sensor', 'searchnode', 'idh', 'receiver', 'heavynode', 'fleet']
|
||||||
|
LICENSE_PATH = '/opt/so/saltstack/local/pillar/soc/license.sls'
|
||||||
|
DEFAULTS_PATH = '/opt/so/saltstack/default/salt/hypervisor/defaults.yaml'
|
||||||
|
# Define the retention period for destroyed VMs (in hours)
|
||||||
|
DESTROYED_VM_RETENTION_HOURS = 48
|
||||||
|
|
||||||
|
# Single engine-wide lock for virtual node manager
|
||||||
|
engine_lock = Lock()
|
||||||
|
|
||||||
|
def read_json_file(file_path: str) -> Any:
|
||||||
|
"""
|
||||||
|
Read and parse a JSON file.
|
||||||
|
Returns an empty array if the file is empty.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r') as f:
|
||||||
|
content = f.read().strip()
|
||||||
|
if not content:
|
||||||
|
return []
|
||||||
|
return json.loads(content)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to read JSON file %s: %s", file_path, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def set_socore_ownership(path: str) -> None:
|
||||||
|
"""Set socore ownership on file or directory."""
|
||||||
|
try:
|
||||||
|
os.chown(path, SOCORE_UID, SOCORE_GID)
|
||||||
|
log.debug("Set socore ownership on %s", path)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to set socore ownership on %s: %s", path, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def write_json_file(file_path: str, data: Any) -> None:
|
||||||
|
"""Write data to a JSON file with socore ownership."""
|
||||||
|
try:
|
||||||
|
# Create parent directory if it doesn't exist
|
||||||
|
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||||
|
with open(file_path, 'w') as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
set_socore_ownership(file_path)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to write JSON file %s: %s", file_path, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def read_yaml_file(file_path: str) -> dict:
|
||||||
|
"""Read and parse a YAML file."""
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r') as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to read YAML file %s: %s", file_path, str(e))
|
||||||
|
raise
|
||||||
|
def convert_pci_id(pci_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert PCI ID from pci_0000_c7_00_0 format to 0000:c7:00.0 format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pci_id: PCI ID in underscore format (e.g., pci_0000_c7_00_0)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PCI ID in domain:bus:slot.function format (e.g., 0000:c7:00.0)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> convert_pci_id('pci_0000_c7_00_0')
|
||||||
|
'0000:c7:00.0'
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Remove 'pci_' prefix
|
||||||
|
pci_id = pci_id.replace('pci_', '')
|
||||||
|
|
||||||
|
# Split into components
|
||||||
|
parts = pci_id.split('_')
|
||||||
|
if len(parts) != 4:
|
||||||
|
raise ValueError(f"Invalid PCI ID format: {pci_id}. Expected format: pci_domain_bus_slot_function")
|
||||||
|
|
||||||
|
# Reconstruct with proper format (using period for function)
|
||||||
|
domain, bus, slot, function = parts
|
||||||
|
return f"{domain}:{bus}:{slot}.{function}"
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to convert PCI ID %s: %s", pci_id, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def parse_hardware_indices(hw_value: Any) -> List[int]:
|
||||||
|
"""
|
||||||
|
Parse hardware indices from JSON array format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hw_value: Hardware value which should be a list
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of integer indices
|
||||||
|
"""
|
||||||
|
indices = []
|
||||||
|
|
||||||
|
if hw_value is None:
|
||||||
|
return indices
|
||||||
|
|
||||||
|
# If it's a list (expected format)
|
||||||
|
if isinstance(hw_value, list):
|
||||||
|
try:
|
||||||
|
indices = [int(x) for x in hw_value]
|
||||||
|
log.debug("Parsed hardware indices from list format: %s", indices)
|
||||||
|
except (ValueError, TypeError) as e:
|
||||||
|
log.error("Failed to parse hardware indices from list format: %s", str(e))
|
||||||
|
raise ValueError(f"Invalid hardware indices format in list: {hw_value}")
|
||||||
|
else:
|
||||||
|
log.warning("Unexpected type for hardware indices: %s", type(hw_value))
|
||||||
|
raise ValueError(f"Hardware indices must be in array format, got: {type(hw_value)}")
|
||||||
|
|
||||||
|
return indices
|
||||||
|
|
||||||
|
def get_hypervisor_model(hypervisor: str) -> str:
|
||||||
|
"""Get sosmodel from hypervisor grains."""
|
||||||
|
try:
|
||||||
|
# Get cached grains using Salt runner
|
||||||
|
grains = runner.cmd(
|
||||||
|
'cache.grains',
|
||||||
|
[f'{hypervisor}_*', 'glob']
|
||||||
|
)
|
||||||
|
if not grains:
|
||||||
|
raise ValueError(f"No grains found for hypervisor {hypervisor}")
|
||||||
|
|
||||||
|
# Get the first minion ID that matches our hypervisor
|
||||||
|
minion_id = next(iter(grains.keys()))
|
||||||
|
model = grains[minion_id].get('sosmodel')
|
||||||
|
if not model:
|
||||||
|
raise ValueError(f"No sosmodel grain found for hypervisor {hypervisor}")
|
||||||
|
|
||||||
|
log.debug("Found model %s for hypervisor %s", model, hypervisor)
|
||||||
|
return model
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to get hypervisor model: %s", str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def load_hardware_defaults(model: str) -> dict:
|
||||||
|
"""Load hardware configuration from defaults.yaml."""
|
||||||
|
try:
|
||||||
|
defaults = read_yaml_file(DEFAULTS_PATH)
|
||||||
|
if not defaults or 'hypervisor' not in defaults:
|
||||||
|
raise ValueError("Invalid defaults.yaml structure")
|
||||||
|
if 'model' not in defaults['hypervisor']:
|
||||||
|
raise ValueError("No model configurations found in defaults.yaml")
|
||||||
|
if model not in defaults['hypervisor']['model']:
|
||||||
|
raise ValueError(f"Model {model} not found in defaults.yaml")
|
||||||
|
return defaults['hypervisor']['model'][model]
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to load hardware defaults: %s", str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def validate_hardware_request(model_config: dict, requested_hw: dict) -> Tuple[bool, Optional[dict]]:
|
||||||
|
"""
|
||||||
|
Validate hardware request against model capabilities.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (is_valid, error_details)
|
||||||
|
"""
|
||||||
|
errors = {}
|
||||||
|
log.debug("Validating if requested hardware exists in model configuration")
|
||||||
|
log.debug("Requested hardware: %s", requested_hw)
|
||||||
|
log.debug("Model hardware configuration: %s", model_config['hardware'])
|
||||||
|
|
||||||
|
# Validate CPU
|
||||||
|
if 'cpu' in requested_hw:
|
||||||
|
try:
|
||||||
|
cpu_count = int(requested_hw['cpu'])
|
||||||
|
log.debug("Checking if %d CPU cores exist in model (maximum: %d)",
|
||||||
|
cpu_count, model_config['hardware']['cpu'])
|
||||||
|
if cpu_count > model_config['hardware']['cpu']:
|
||||||
|
errors['cpu'] = f"Requested {cpu_count} CPU cores exceeds maximum {model_config['hardware']['cpu']}"
|
||||||
|
except ValueError:
|
||||||
|
errors['cpu'] = "Invalid CPU value"
|
||||||
|
|
||||||
|
# Validate Memory
|
||||||
|
if 'memory' in requested_hw:
|
||||||
|
try:
|
||||||
|
memory = int(requested_hw['memory'])
|
||||||
|
log.debug("Checking if %dGB memory exists in model (maximum: %dGB)",
|
||||||
|
memory, model_config['hardware']['memory'])
|
||||||
|
if memory > model_config['hardware']['memory']:
|
||||||
|
errors['memory'] = f"Requested {memory}GB memory exceeds maximum {model_config['hardware']['memory']}GB"
|
||||||
|
except ValueError:
|
||||||
|
errors['memory'] = "Invalid memory value"
|
||||||
|
|
||||||
|
# Validate PCI devices
|
||||||
|
for hw_type in ['disk', 'copper', 'sfp']:
|
||||||
|
if hw_type in requested_hw and requested_hw[hw_type]:
|
||||||
|
try:
|
||||||
|
indices = parse_hardware_indices(requested_hw[hw_type])
|
||||||
|
log.debug("Checking if %s indices %s exist in model", hw_type, indices)
|
||||||
|
|
||||||
|
if hw_type not in model_config['hardware']:
|
||||||
|
log.error("Hardware type %s not found in model config", hw_type)
|
||||||
|
errors[hw_type] = f"No {hw_type} configuration found in model"
|
||||||
|
continue
|
||||||
|
|
||||||
|
model_indices = set(int(k) for k in model_config['hardware'][hw_type].keys())
|
||||||
|
log.debug("Model has %s indices: %s", hw_type, model_indices)
|
||||||
|
|
||||||
|
invalid_indices = [idx for idx in indices if idx not in model_indices]
|
||||||
|
if invalid_indices:
|
||||||
|
log.error("%s indices %s do not exist in model", hw_type, invalid_indices)
|
||||||
|
errors[hw_type] = f"Invalid {hw_type} indices: {invalid_indices}"
|
||||||
|
except ValueError as e:
|
||||||
|
log.error("Invalid %s indices format: %s", hw_type, str(e))
|
||||||
|
errors[hw_type] = f"Invalid {hw_type} indices format"
|
||||||
|
except KeyError:
|
||||||
|
log.error("No %s configuration found in model", hw_type)
|
||||||
|
errors[hw_type] = f"No {hw_type} configuration found in model"
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
log.error("Hardware validation failed with errors: %s", errors)
|
||||||
|
else:
|
||||||
|
log.debug("Hardware validation successful")
|
||||||
|
|
||||||
|
return (len(errors) == 0, errors if errors else None)
|
||||||
|
|
||||||
|
def check_hardware_availability(hypervisor_path: str, vm_name: str, requested_hw: dict, model_config: dict) -> Tuple[bool, Optional[dict]]:
|
||||||
|
"""
|
||||||
|
Check if requested hardware is available.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor_path: Path to hypervisor directory
|
||||||
|
vm_name: Name of requesting VM
|
||||||
|
requested_hw: Hardware being requested
|
||||||
|
model_config: Model hardware configuration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (is_available, error_details)
|
||||||
|
"""
|
||||||
|
log.debug("Checking if requested hardware is currently in use by other VMs")
|
||||||
|
log.debug("VM requesting hardware: %s", vm_name)
|
||||||
|
log.debug("Hardware being requested: %s", requested_hw)
|
||||||
|
|
||||||
|
errors = {}
|
||||||
|
|
||||||
|
# Track total CPU/memory usage
|
||||||
|
total_cpu = 0
|
||||||
|
total_memory = 0
|
||||||
|
|
||||||
|
# Track used unique resources and which VM is using them
|
||||||
|
used_resources = {
|
||||||
|
'disk': {}, # {index: vm_name}
|
||||||
|
'copper': {}, # {index: vm_name}
|
||||||
|
'sfp': {} # {index: vm_name}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate current usage from existing VMs
|
||||||
|
log.debug("Scanning existing VMs to check hardware usage")
|
||||||
|
for vm_file in glob.glob(os.path.join(hypervisor_path, '*_*')):
|
||||||
|
basename = os.path.basename(vm_file)
|
||||||
|
# Skip if it's the same VM requesting hardware or in error state
|
||||||
|
if basename.startswith(vm_name):
|
||||||
|
log.debug("Skipping file %s (same VM requesting hardware)", basename)
|
||||||
|
continue
|
||||||
|
if basename.endswith('.error'):
|
||||||
|
log.debug("Skipping file %s (error state)", basename)
|
||||||
|
continue
|
||||||
|
|
||||||
|
vm_config = read_json_file(vm_file)
|
||||||
|
if 'config' not in vm_config:
|
||||||
|
log.debug("Skipping VM %s (no config found)", basename)
|
||||||
|
continue
|
||||||
|
|
||||||
|
config = vm_config['config']
|
||||||
|
log.debug("Processing running VM %s", basename)
|
||||||
|
|
||||||
|
# Add to CPU/memory totals
|
||||||
|
vm_cpu = int(config.get('cpu', 0))
|
||||||
|
vm_memory = int(config.get('memory', 0))
|
||||||
|
total_cpu += vm_cpu
|
||||||
|
total_memory += vm_memory
|
||||||
|
log.debug("Found running VM %s using CPU: %d, Memory: %dGB", basename, vm_cpu, vm_memory)
|
||||||
|
|
||||||
|
# Track unique resources
|
||||||
|
for hw_type in ['disk', 'copper', 'sfp']:
|
||||||
|
if hw_type in config and config[hw_type]:
|
||||||
|
try:
|
||||||
|
indices = parse_hardware_indices(config[hw_type])
|
||||||
|
for idx in indices:
|
||||||
|
used_resources[hw_type][idx] = basename.replace('_sensor', '') # Store VM name without role
|
||||||
|
log.debug("VM %s is using %s indices: %s", basename, hw_type, indices)
|
||||||
|
except ValueError as e:
|
||||||
|
log.error("Error parsing %s indices for VM %s: %s", hw_type, basename, str(e))
|
||||||
|
|
||||||
|
log.debug("Total hardware currently in use - CPU: %d, Memory: %dGB", total_cpu, total_memory)
|
||||||
|
log.debug("Hardware indices currently in use: %s", used_resources)
|
||||||
|
|
||||||
|
# Check CPU capacity
|
||||||
|
requested_cpu = int(requested_hw.get('cpu', 0))
|
||||||
|
total_cpu_needed = total_cpu + requested_cpu
|
||||||
|
log.debug("Checking CPU capacity - Currently in use: %d + Requested: %d = %d (Max: %d)",
|
||||||
|
total_cpu, requested_cpu, total_cpu_needed, model_config['hardware']['cpu'])
|
||||||
|
if total_cpu_needed > model_config['hardware']['cpu']:
|
||||||
|
errors['cpu'] = f"Total CPU usage ({total_cpu_needed}) would exceed capacity ({model_config['hardware']['cpu']})"
|
||||||
|
|
||||||
|
# Check memory capacity
|
||||||
|
requested_memory = int(requested_hw.get('memory', 0))
|
||||||
|
total_memory_needed = total_memory + requested_memory
|
||||||
|
log.debug("Checking memory capacity - Currently in use: %d + Requested: %d = %d (Max: %d)",
|
||||||
|
total_memory, requested_memory, total_memory_needed, model_config['hardware']['memory'])
|
||||||
|
if total_memory_needed > model_config['hardware']['memory']:
|
||||||
|
errors['memory'] = f"Total memory usage ({total_memory_needed}GB) would exceed capacity ({model_config['hardware']['memory']}GB)"
|
||||||
|
|
||||||
|
# Check for hardware conflicts
|
||||||
|
for hw_type in ['disk', 'copper', 'sfp']:
|
||||||
|
if hw_type in requested_hw and requested_hw[hw_type]:
|
||||||
|
try:
|
||||||
|
requested_indices = parse_hardware_indices(requested_hw[hw_type])
|
||||||
|
log.debug("Checking for %s conflicts - Requesting indices: %s, Currently in use: %s",
|
||||||
|
hw_type, requested_indices, used_resources[hw_type])
|
||||||
|
conflicts = {} # {index: vm_name}
|
||||||
|
for idx in requested_indices:
|
||||||
|
if idx in used_resources[hw_type]:
|
||||||
|
conflicts[idx] = used_resources[hw_type][idx]
|
||||||
|
|
||||||
|
if conflicts:
|
||||||
|
# Create one sentence per conflict
|
||||||
|
conflict_details = []
|
||||||
|
hw_name = hw_type.upper() if hw_type == 'sfp' else hw_type.capitalize()
|
||||||
|
for idx, vm in conflicts.items():
|
||||||
|
conflict_details.append(f"{hw_name} index {idx} in use by {vm}")
|
||||||
|
|
||||||
|
log.debug("Found conflicting %s indices: %s", hw_type, conflict_details)
|
||||||
|
errors[hw_type] = ". ".join(conflict_details) + "."
|
||||||
|
except ValueError as e:
|
||||||
|
log.error("Error parsing %s indices for conflict check: %s", hw_type, str(e))
|
||||||
|
errors[hw_type] = f"Invalid {hw_type} indices format"
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
log.debug("Hardware validation failed with errors: %s", errors)
|
||||||
|
else:
|
||||||
|
log.debug("Hardware validation successful")
|
||||||
|
|
||||||
|
return (len(errors) == 0, errors if errors else None)
|
||||||
|
|
||||||
|
def create_vm_tracking_file(hypervisor_path: str, vm_name: str, config: dict) -> None:
|
||||||
|
"""Create VM tracking file with initial state."""
|
||||||
|
file_path = os.path.join(hypervisor_path, vm_name)
|
||||||
|
log.debug("Creating VM tracking file at %s", file_path)
|
||||||
|
try:
|
||||||
|
# Create parent directory if it doesn't exist
|
||||||
|
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||||
|
set_socore_ownership(os.path.dirname(file_path))
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'config': config
|
||||||
|
}
|
||||||
|
# Write file and set ownership
|
||||||
|
write_json_file(file_path, data)
|
||||||
|
log.debug("Successfully created VM tracking file with socore ownership")
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to create VM tracking file: %s", str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def mark_vm_failed(vm_file: str, error_code: int, message: str) -> None:
|
||||||
|
"""Create error file with VM failure details."""
|
||||||
|
try:
|
||||||
|
# Get original config if it exists
|
||||||
|
config = {}
|
||||||
|
if os.path.exists(vm_file):
|
||||||
|
data = read_json_file(vm_file)
|
||||||
|
config = data.get('config', {})
|
||||||
|
# Remove the original file since we'll create an error file
|
||||||
|
os.remove(vm_file)
|
||||||
|
|
||||||
|
# Create error file
|
||||||
|
error_file = f"{vm_file}.error"
|
||||||
|
data = {
|
||||||
|
'config': config,
|
||||||
|
'status': 'error',
|
||||||
|
'timestamp': datetime.now().isoformat(),
|
||||||
|
'error_details': {
|
||||||
|
'code': error_code,
|
||||||
|
'message': message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
write_json_file(error_file, data)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to create error file: %s", str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def mark_invalid_hardware(hypervisor_path: str, vm_name: str, config: dict, error_details: dict) -> None:
|
||||||
|
"""Create error file with hardware validation failure details."""
|
||||||
|
file_path = os.path.join(hypervisor_path, f"{vm_name}.error")
|
||||||
|
try:
|
||||||
|
# Build error message from error details
|
||||||
|
error_messages = []
|
||||||
|
for hw_type, message in error_details.items():
|
||||||
|
error_messages.append(message)
|
||||||
|
|
||||||
|
# Join all messages with proper sentence structure
|
||||||
|
full_message = "Hardware validation failure: " + " ".join(error_messages)
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'config': config,
|
||||||
|
'status': 'error',
|
||||||
|
'timestamp': datetime.now().isoformat(),
|
||||||
|
'error_details': {
|
||||||
|
'code': 3, # Hardware validation failure code
|
||||||
|
'message': full_message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
write_json_file(file_path, data)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to create invalid hardware file: %s", str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def validate_vrt_license() -> bool:
|
||||||
|
"""Check if the license file exists and contains required values."""
|
||||||
|
if not os.path.exists(LICENSE_PATH):
|
||||||
|
log.error("License file not found at %s", LICENSE_PATH)
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(LICENSE_PATH, 'r') as f:
|
||||||
|
license_data = yaml.safe_load(f)
|
||||||
|
|
||||||
|
if not license_data:
|
||||||
|
log.error("Empty or invalid license file")
|
||||||
|
return False
|
||||||
|
|
||||||
|
license_id = license_data.get('license_id')
|
||||||
|
features = license_data.get('features', [])
|
||||||
|
|
||||||
|
if not license_id:
|
||||||
|
log.error("No license_id found in license file")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if 'vrt' not in features:
|
||||||
|
log.error("Hypervisor nodes are a feature supported only for customers with a valid license.\n"
|
||||||
|
"Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com\n"
|
||||||
|
"for more information about purchasing a license to enable this feature.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
log.debug("License validation successful")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error reading license file: %s", str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def process_vm_creation(hypervisor_path: str, vm_config: dict) -> None:
|
||||||
|
"""
|
||||||
|
Process a single VM creation request.
|
||||||
|
|
||||||
|
This function handles the creation of a new VM, including hardware validation,
|
||||||
|
resource allocation, and provisioning. All operations are protected by the
|
||||||
|
engine-wide lock that is acquired at engine start.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor_path: Path to the hypervisor directory
|
||||||
|
vm_config: Dictionary containing VM configuration
|
||||||
|
"""
|
||||||
|
# Get the actual hypervisor name (last directory in path)
|
||||||
|
hypervisor = os.path.basename(hypervisor_path)
|
||||||
|
vm_name = f"{vm_config['hostname']}_{vm_config['role']}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get hypervisor model and capabilities
|
||||||
|
model = get_hypervisor_model(hypervisor)
|
||||||
|
model_config = load_hardware_defaults(model)
|
||||||
|
|
||||||
|
# Send Processing status event
|
||||||
|
try:
|
||||||
|
subprocess.run([
|
||||||
|
'so-salt-emit-vm-deployment-status-event',
|
||||||
|
'-v', vm_name,
|
||||||
|
'-H', hypervisor,
|
||||||
|
'-s', 'Processing'
|
||||||
|
], check=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"Failed to emit success status event: {e}")
|
||||||
|
|
||||||
|
# Initial hardware validation against model
|
||||||
|
is_valid, errors = validate_hardware_request(model_config, vm_config)
|
||||||
|
if not is_valid:
|
||||||
|
mark_invalid_hardware(hypervisor_path, vm_name, vm_config, errors)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check hardware availability
|
||||||
|
is_available, availability_errors = check_hardware_availability(
|
||||||
|
hypervisor_path, vm_name, vm_config, model_config)
|
||||||
|
if not is_available:
|
||||||
|
mark_invalid_hardware(hypervisor_path, vm_name, vm_config, availability_errors)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create tracking file
|
||||||
|
create_vm_tracking_file(hypervisor_path, vm_name, vm_config)
|
||||||
|
|
||||||
|
# Build and execute so-salt-cloud command
|
||||||
|
cmd = ['so-salt-cloud', '-p', f'sool9-{hypervisor}', vm_name]
|
||||||
|
|
||||||
|
# Add network configuration
|
||||||
|
if vm_config['network_mode'] == 'static4':
|
||||||
|
cmd.extend(['--static4', '--ip4', vm_config['ip4'], '--gw4', vm_config['gw4']])
|
||||||
|
if 'dns4' in vm_config:
|
||||||
|
cmd.extend(['--dns4', vm_config['dns4']])
|
||||||
|
if 'search4' in vm_config:
|
||||||
|
cmd.extend(['--search4', vm_config['search4']])
|
||||||
|
else:
|
||||||
|
cmd.append('--dhcp4')
|
||||||
|
|
||||||
|
# Add hardware configuration
|
||||||
|
if 'cpu' in vm_config:
|
||||||
|
cmd.extend(['-c', str(vm_config['cpu'])])
|
||||||
|
if 'memory' in vm_config:
|
||||||
|
memory_mib = int(vm_config['memory']) * 1024
|
||||||
|
cmd.extend(['-m', str(memory_mib)])
|
||||||
|
|
||||||
|
# Add PCI devices
|
||||||
|
for hw_type in ['disk', 'copper', 'sfp']:
|
||||||
|
if hw_type in vm_config and vm_config[hw_type]:
|
||||||
|
try:
|
||||||
|
indices = parse_hardware_indices(vm_config[hw_type])
|
||||||
|
for idx in indices:
|
||||||
|
hw_config = {int(k): v for k, v in model_config['hardware'][hw_type].items()}
|
||||||
|
pci_id = hw_config[idx]
|
||||||
|
converted_pci_id = convert_pci_id(pci_id)
|
||||||
|
cmd.extend(['-P', converted_pci_id])
|
||||||
|
except ValueError as e:
|
||||||
|
error_msg = f"Failed to parse {hw_type} indices: {str(e)}"
|
||||||
|
log.error(error_msg)
|
||||||
|
mark_vm_failed(os.path.join(hypervisor_path, vm_name), 3, error_msg)
|
||||||
|
raise ValueError(error_msg)
|
||||||
|
except KeyError as e:
|
||||||
|
error_msg = f"Invalid {hw_type} index: {str(e)}"
|
||||||
|
log.error(error_msg)
|
||||||
|
mark_vm_failed(os.path.join(hypervisor_path, vm_name), 3, error_msg)
|
||||||
|
raise KeyError(error_msg)
|
||||||
|
|
||||||
|
# Execute command
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||||
|
|
||||||
|
# Update tracking file if needed
|
||||||
|
tracking_file = os.path.join(hypervisor_path, vm_name)
|
||||||
|
data = read_json_file(tracking_file)
|
||||||
|
write_json_file(tracking_file, data)
|
||||||
|
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
error_msg = f"so-salt-cloud execution failed (code {e.returncode})"
|
||||||
|
if e.stderr:
|
||||||
|
error_msg = f"{error_msg}: {e.stderr}"
|
||||||
|
log.error(error_msg)
|
||||||
|
mark_vm_failed(os.path.join(hypervisor_path, vm_name), 4, error_msg)
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"VM creation failed: {str(e)}"
|
||||||
|
log.error(error_msg)
|
||||||
|
if not os.path.exists(os.path.join(hypervisor_path, vm_name)):
|
||||||
|
mark_vm_failed(os.path.join(hypervisor_path, f"{vm_name}_failed"), 4, error_msg)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def cleanup_destroyed_vm_status_files(hypervisor_path: str) -> None:
|
||||||
|
"""
|
||||||
|
Clean up status files for destroyed VMs that are older than the retention period.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor_path: Path to the hypervisor directory
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
log.debug(f"Using destroyed VM retention period of {DESTROYED_VM_RETENTION_HOURS} hours")
|
||||||
|
|
||||||
|
# Calculate the retention cutoff time
|
||||||
|
cutoff_time = datetime.now() - timedelta(hours=DESTROYED_VM_RETENTION_HOURS)
|
||||||
|
|
||||||
|
# Find all status files for destroyed VMs
|
||||||
|
status_files = glob.glob(os.path.join(hypervisor_path, '*_*.status'))
|
||||||
|
log.debug(f"Found {len(status_files)} status files to check for expired destroyed VMs")
|
||||||
|
|
||||||
|
for status_file in status_files:
|
||||||
|
try:
|
||||||
|
# Read the status file
|
||||||
|
status_data = read_json_file(status_file)
|
||||||
|
|
||||||
|
# Check if this is a destroyed VM
|
||||||
|
if status_data.get('status') == 'Destroyed Instance':
|
||||||
|
# Parse the timestamp
|
||||||
|
timestamp_str = status_data.get('timestamp', '')
|
||||||
|
if timestamp_str:
|
||||||
|
timestamp = datetime.fromisoformat(timestamp_str)
|
||||||
|
vm_name = os.path.basename(status_file).replace('.status', '')
|
||||||
|
age_hours = (datetime.now() - timestamp).total_seconds() / 3600
|
||||||
|
|
||||||
|
# If older than retention period, delete the file
|
||||||
|
if timestamp < cutoff_time:
|
||||||
|
log.info(f"Removing expired status file for VM {vm_name} (age: {age_hours:.1f} hours > retention: {DESTROYED_VM_RETENTION_HOURS} hours)")
|
||||||
|
os.remove(status_file)
|
||||||
|
else:
|
||||||
|
log.debug(f"Status file for VM {vm_name} (age: {age_hours:.1f} hours < retention: {DESTROYED_VM_RETENTION_HOURS} hours)")
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Error processing status file {status_file}: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Failed to clean up destroyed VM status files: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def process_vm_deletion(hypervisor_path: str, vm_name: str) -> None:
|
||||||
|
"""
|
||||||
|
Process a single VM deletion request.
|
||||||
|
|
||||||
|
This function handles the deletion of an existing VM. All operations are protected
|
||||||
|
by the engine-wide lock that is acquired at engine start.
|
||||||
|
|
||||||
|
If so-salt-cloud fails during VM deletion, the function will restore the VM
|
||||||
|
configuration back to the VMs file to maintain consistency.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor_path: Path to the hypervisor directory
|
||||||
|
vm_name: Name of the VM to delete
|
||||||
|
"""
|
||||||
|
vm_config = None
|
||||||
|
hypervisor = os.path.basename(hypervisor_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Read VM configuration from tracking file before attempting deletion
|
||||||
|
vm_file = os.path.join(hypervisor_path, vm_name)
|
||||||
|
if os.path.exists(vm_file):
|
||||||
|
try:
|
||||||
|
vm_data = read_json_file(vm_file)
|
||||||
|
vm_config = vm_data.get('config') if isinstance(vm_data, dict) else None
|
||||||
|
if vm_config:
|
||||||
|
log.debug("Read VM config for %s before deletion", vm_name)
|
||||||
|
else:
|
||||||
|
log.warning("No config found in tracking file for %s", vm_name)
|
||||||
|
except Exception as e:
|
||||||
|
log.warning("Failed to read VM config from tracking file %s: %s", vm_file, str(e))
|
||||||
|
|
||||||
|
# Attempt VM deletion with so-salt-cloud
|
||||||
|
cmd = ['so-salt-cloud', '-p', f'sool9-{hypervisor}', vm_name, '-yd']
|
||||||
|
|
||||||
|
log.info("Executing: %s", ' '.join(cmd))
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||||
|
|
||||||
|
# Log command output
|
||||||
|
if result.stdout:
|
||||||
|
log.debug("Command stdout: %s", result.stdout)
|
||||||
|
if result.stderr:
|
||||||
|
log.warning("Command stderr: %s", result.stderr)
|
||||||
|
|
||||||
|
# Remove VM tracking file on successful deletion
|
||||||
|
if os.path.exists(vm_file):
|
||||||
|
os.remove(vm_file)
|
||||||
|
log.info("Successfully removed VM tracking file for %s", vm_name)
|
||||||
|
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
error_msg = f"so-salt-cloud deletion failed (code {e.returncode}): {e.stderr}"
|
||||||
|
log.error("%s", error_msg)
|
||||||
|
log.error("Ensure all hypervisors are online. salt-cloud will fail to destroy VMs if any hypervisors are offline.")
|
||||||
|
|
||||||
|
# Attempt to restore VM configuration to VMs file if we have the config
|
||||||
|
if vm_config:
|
||||||
|
try:
|
||||||
|
_restore_vm_to_vms_file(hypervisor_path, hypervisor, vm_config)
|
||||||
|
except Exception as restore_error:
|
||||||
|
log.error("Failed to restore VM config after deletion failure: %s", str(restore_error))
|
||||||
|
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error processing VM deletion: %s", str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def _restore_vm_to_vms_file(hypervisor_path: str, hypervisor: str, vm_config: dict) -> None:
|
||||||
|
"""
|
||||||
|
Restore VM configuration to the VMs file after failed deletion.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor_path: Path to the hypervisor directory
|
||||||
|
hypervisor: Name of the hypervisor
|
||||||
|
vm_config: VM configuration to restore
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Construct VMs file path
|
||||||
|
vms_file = os.path.join(os.path.dirname(hypervisor_path), f"{hypervisor}VMs")
|
||||||
|
|
||||||
|
# Read current VMs file
|
||||||
|
current_vms = []
|
||||||
|
if os.path.exists(vms_file):
|
||||||
|
try:
|
||||||
|
current_vms = read_json_file(vms_file)
|
||||||
|
if not isinstance(current_vms, list):
|
||||||
|
log.warning("VMs file contains non-array data, initializing as empty array")
|
||||||
|
current_vms = []
|
||||||
|
except Exception as e:
|
||||||
|
log.warning("Failed to read VMs file %s, initializing as empty: %s", vms_file, str(e))
|
||||||
|
current_vms = []
|
||||||
|
|
||||||
|
# Check if VM already exists in VMs file (prevent duplicates)
|
||||||
|
vm_hostname = vm_config.get('hostname')
|
||||||
|
if vm_hostname:
|
||||||
|
for existing_vm in current_vms:
|
||||||
|
if isinstance(existing_vm, dict) and existing_vm.get('hostname') == vm_hostname:
|
||||||
|
log.info("VM with hostname %s already exists in VMs file, skipping restoration", vm_hostname)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Add VM configuration back to VMs file
|
||||||
|
current_vms.append(vm_config)
|
||||||
|
|
||||||
|
# Write updated VMs file
|
||||||
|
write_json_file(vms_file, current_vms)
|
||||||
|
log.info("Successfully restored VM config for %s to VMs file %s", vm_hostname or 'unknown', vms_file)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to restore VM configuration: %s", str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def process_hypervisor(hypervisor_path: str) -> None:
|
||||||
|
"""
|
||||||
|
Process VM configurations for a single hypervisor.
|
||||||
|
|
||||||
|
This function handles the processing of VM configurations for a hypervisor,
|
||||||
|
including creation of new VMs and deletion of removed VMs. All operations
|
||||||
|
are protected by the engine-wide lock that is acquired at engine start.
|
||||||
|
|
||||||
|
The function performs the following steps:
|
||||||
|
1. Reads VMs configuration from <hypervisorHostname>VMs file
|
||||||
|
2. Identifies existing VMs
|
||||||
|
3. Processes new VM creation requests
|
||||||
|
4. Handles VM deletions for removed configurations
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor_path: Path to the hypervisor directory
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Get hypervisor name from path
|
||||||
|
hypervisor = os.path.basename(hypervisor_path)
|
||||||
|
|
||||||
|
# Read VMs file instead of nodes
|
||||||
|
vms_file = os.path.join(os.path.dirname(hypervisor_path), f"{hypervisor}VMs")
|
||||||
|
if not os.path.exists(vms_file):
|
||||||
|
log.debug("No VMs file found at %s", vms_file)
|
||||||
|
|
||||||
|
# Even if no VMs file exists, we should still clean up any expired status files
|
||||||
|
cleanup_destroyed_vm_status_files(hypervisor_path)
|
||||||
|
return
|
||||||
|
|
||||||
|
nodes_config = read_json_file(vms_file)
|
||||||
|
if not nodes_config:
|
||||||
|
log.debug("Empty VMs configuration in %s", vms_file)
|
||||||
|
|
||||||
|
# Get existing VMs
|
||||||
|
existing_vms = set()
|
||||||
|
for file_path in glob.glob(os.path.join(hypervisor_path, '*_*')):
|
||||||
|
basename = os.path.basename(file_path)
|
||||||
|
# Skip error and status files
|
||||||
|
if not basename.endswith('.error') and not basename.endswith('.status'):
|
||||||
|
existing_vms.add(basename)
|
||||||
|
|
||||||
|
# Process new VMs
|
||||||
|
configured_vms = set()
|
||||||
|
for vm_config in nodes_config:
|
||||||
|
if 'hostname' not in vm_config or 'role' not in vm_config:
|
||||||
|
log.error("Invalid VM configuration: missing hostname or role")
|
||||||
|
continue
|
||||||
|
|
||||||
|
vm_name = f"{vm_config['hostname']}_{vm_config['role']}"
|
||||||
|
configured_vms.add(vm_name)
|
||||||
|
|
||||||
|
if vm_name not in existing_vms:
|
||||||
|
# process_vm_creation handles its own locking
|
||||||
|
process_vm_creation(hypervisor_path, vm_config)
|
||||||
|
|
||||||
|
# Process VM deletions
|
||||||
|
vms_to_delete = existing_vms - configured_vms
|
||||||
|
log.debug(f"Existing VMs: {existing_vms}")
|
||||||
|
log.debug(f"Configured VMs: {configured_vms}")
|
||||||
|
log.debug(f"VMs to delete: {vms_to_delete}")
|
||||||
|
for vm_name in vms_to_delete:
|
||||||
|
log.info(f"Initiating deletion process for VM: {vm_name}")
|
||||||
|
process_vm_deletion(hypervisor_path, vm_name)
|
||||||
|
|
||||||
|
# Clean up expired status files for destroyed VMs
|
||||||
|
cleanup_destroyed_vm_status_files(hypervisor_path)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to process hypervisor %s: %s", hypervisor_path, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def start(interval: int = DEFAULT_INTERVAL,
|
||||||
|
base_path: str = DEFAULT_BASE_PATH) -> None:
|
||||||
|
"""
|
||||||
|
Process virtual node configurations.
|
||||||
|
|
||||||
|
This function implements a single engine-wide lock to ensure only one
|
||||||
|
instance of the virtual node manager runs at a time. The lock is:
|
||||||
|
- Acquired at start
|
||||||
|
- Released after processing completes
|
||||||
|
|
||||||
|
Args:
|
||||||
|
interval: Time in seconds between engine runs (managed by salt-master)
|
||||||
|
base_path: Base path containing hypervisor configurations
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Lock remains if engine encounters an error
|
||||||
|
- Admin must restart service to clear lock
|
||||||
|
- Error-level logging used for lock issues
|
||||||
|
"""
|
||||||
|
log.debug("Starting virtual node manager engine")
|
||||||
|
|
||||||
|
if not validate_vrt_license():
|
||||||
|
return
|
||||||
|
|
||||||
|
# Attempt to acquire lock
|
||||||
|
if not engine_lock.acquire(blocking=False):
|
||||||
|
log.error("Another virtual node manager is already running")
|
||||||
|
return
|
||||||
|
|
||||||
|
log.debug("Virtual node manager acquired lock")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Process each hypervisor directory
|
||||||
|
for hypervisor_path in glob.glob(os.path.join(base_path, '*')):
|
||||||
|
if os.path.isdir(hypervisor_path):
|
||||||
|
process_hypervisor(hypervisor_path)
|
||||||
|
|
||||||
|
# Clean shutdown - release lock
|
||||||
|
log.debug("Virtual node manager releasing lock")
|
||||||
|
engine_lock.release()
|
||||||
|
log.debug("Virtual node manager completed successfully")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error in virtual node manager: %s", str(e))
|
||||||
|
return
|
||||||
330
salt/salt/engines/master/virtual_power_manager.py
Normal file
330
salt/salt/engines/master/virtual_power_manager.py
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
#!/opt/saltstack/salt/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
"""
|
||||||
|
Salt Engine for Virtual Machine Power Management
|
||||||
|
|
||||||
|
This engine manages power control actions for virtual machines in Security Onion's
|
||||||
|
virtualization infrastructure. It monitors VM configurations for power control requests
|
||||||
|
and executes the appropriate virt module actions.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
engines:
|
||||||
|
- virtual_power_manager:
|
||||||
|
interval: 60
|
||||||
|
base_path: /opt/so/saltstack/local/salt/hypervisor/hosts
|
||||||
|
|
||||||
|
Options:
|
||||||
|
interval: Time in seconds between engine runs (managed by salt-master, default: 60)
|
||||||
|
base_path: Base directory containing hypervisor configurations (default: /opt/so/saltstack/local/salt/hypervisor/hosts)
|
||||||
|
|
||||||
|
Configuration Files:
|
||||||
|
<hypervisorHostname>VMs: JSON file containing VM configurations
|
||||||
|
- Located at <base_path>/<hypervisorHostname>VMs
|
||||||
|
- Contains array of VM configurations
|
||||||
|
- Power control requests are specified with the "powercontrol" key
|
||||||
|
- Valid values for "powercontrol": "Reboot", "Reset", "Shutdown", "Start", "Stop"
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. Basic Configuration:
|
||||||
|
engines:
|
||||||
|
- virtual_power_manager: {}
|
||||||
|
|
||||||
|
Uses default settings to process power control requests every 60 seconds.
|
||||||
|
|
||||||
|
2. Custom Interval:
|
||||||
|
engines:
|
||||||
|
- virtual_power_manager:
|
||||||
|
interval: 120
|
||||||
|
|
||||||
|
Processes power control requests every 120 seconds.
|
||||||
|
|
||||||
|
Power Control Actions:
|
||||||
|
- Reboot: Gracefully reboot the VM (virt.reboot)
|
||||||
|
- Reset: Force reset the VM (virt.reset)
|
||||||
|
- Shutdown: Gracefully shut down the VM (virt.shutdown)
|
||||||
|
- Start: Start the VM (virt.start)
|
||||||
|
- Stop: Force stop the VM (virt.stop)
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- File locking is used to prevent race conditions when multiple processes access the VMs file
|
||||||
|
- The "powercontrol" key is removed from the VM configuration after successful execution
|
||||||
|
- Comprehensive logging for troubleshooting
|
||||||
|
- No continuous loop (salt-master handles scheduling)
|
||||||
|
- File locking is only applied when a powercontrol key is detected, not on every run
|
||||||
|
|
||||||
|
Description:
|
||||||
|
The engine operates in the following phases:
|
||||||
|
|
||||||
|
1. Configuration Processing
|
||||||
|
- Reads VMs file for each hypervisor without locking
|
||||||
|
- Identifies VMs with "powercontrol" key
|
||||||
|
- If powercontrol key is found, acquires lock and reads file again
|
||||||
|
|
||||||
|
2. Power Control Execution
|
||||||
|
- Maps "powercontrol" value to virt module function
|
||||||
|
- Executes appropriate virt module command
|
||||||
|
- Removes "powercontrol" key after successful execution
|
||||||
|
|
||||||
|
3. File Locking
|
||||||
|
- Acquires lock only when a powercontrol key is detected
|
||||||
|
- Releases lock after modifications
|
||||||
|
- Handles lock acquisition failures
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
Log files are written to /opt/so/log/salt/master
|
||||||
|
Comprehensive logging includes:
|
||||||
|
- Power control action details
|
||||||
|
- Command execution results
|
||||||
|
- Error conditions with full context
|
||||||
|
- File locking operations
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import fcntl
|
||||||
|
import salt.client
|
||||||
|
from typing import Dict, List, Optional, Any, Tuple
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
log.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
# Constants
|
||||||
|
DEFAULT_INTERVAL = 60
|
||||||
|
DEFAULT_BASE_PATH = '/opt/so/saltstack/local/salt/hypervisor/hosts'
|
||||||
|
VALID_POWER_ACTIONS = {'Reboot', 'Reset', 'Shutdown', 'Start', 'Stop'}
|
||||||
|
|
||||||
|
class FileLock:
|
||||||
|
"""
|
||||||
|
Context manager for file locking.
|
||||||
|
|
||||||
|
This class provides a context manager for file locking using fcntl.
|
||||||
|
It acquires an exclusive lock on the file when entering the context
|
||||||
|
and releases the lock when exiting.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
with FileLock(file_path):
|
||||||
|
# Read and modify file
|
||||||
|
# Lock is automatically released when exiting the context
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, file_path: str):
|
||||||
|
self.file_path = file_path
|
||||||
|
self.lock_path = f"{file_path}.lock"
|
||||||
|
self.lock_file = None
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
try:
|
||||||
|
# Open the lock file
|
||||||
|
self.lock_file = open(self.lock_path, 'w')
|
||||||
|
|
||||||
|
# Acquire exclusive lock
|
||||||
|
fcntl.flock(self.lock_file, fcntl.LOCK_EX)
|
||||||
|
log.debug("Acquired lock on %s", self.file_path)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to acquire lock on %s: %s", self.file_path, str(e))
|
||||||
|
if self.lock_file:
|
||||||
|
self.lock_file.close()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
try:
|
||||||
|
# Release lock
|
||||||
|
if self.lock_file:
|
||||||
|
fcntl.flock(self.lock_file, fcntl.LOCK_UN)
|
||||||
|
self.lock_file.close()
|
||||||
|
log.debug("Released lock on %s", self.file_path)
|
||||||
|
|
||||||
|
# Remove lock file
|
||||||
|
if os.path.exists(self.lock_path):
|
||||||
|
os.remove(self.lock_path)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error releasing lock on %s: %s", self.file_path, str(e))
|
||||||
|
|
||||||
|
def read_json_file(file_path: str) -> Any:
|
||||||
|
"""
|
||||||
|
Read and parse a JSON file.
|
||||||
|
Returns an empty array if the file is empty.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r') as f:
|
||||||
|
content = f.read().strip()
|
||||||
|
if not content:
|
||||||
|
return []
|
||||||
|
return json.loads(content)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to read JSON file %s: %s", file_path, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def write_json_file(file_path: str, data: Any) -> None:
|
||||||
|
"""Write data to a JSON file."""
|
||||||
|
try:
|
||||||
|
with open(file_path, 'w') as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to write JSON file %s: %s", file_path, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def has_power_control_requests(nodes_config: List[Dict]) -> bool:
|
||||||
|
"""
|
||||||
|
Check if any VM in the configuration has a powercontrol key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
nodes_config: List of VM configurations
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if at least one VM has a powercontrol key, False otherwise
|
||||||
|
"""
|
||||||
|
return any('powercontrol' in vm_config for vm_config in nodes_config)
|
||||||
|
|
||||||
|
def process_power_control(hypervisor: str, vm_config: dict) -> bool:
|
||||||
|
"""
|
||||||
|
Process a power control request for a VM.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor: Name of the hypervisor
|
||||||
|
vm_config: VM configuration dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the power control action was successful, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Get VM name and power control action
|
||||||
|
vm_name = f"{vm_config['hostname']}_{vm_config['role']}"
|
||||||
|
power_action = vm_config['powercontrol']
|
||||||
|
|
||||||
|
# Validate power action
|
||||||
|
if power_action not in VALID_POWER_ACTIONS:
|
||||||
|
log.error("Invalid power control action: %s", power_action)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Map power action to virt module function
|
||||||
|
virt_function = power_action.lower()
|
||||||
|
|
||||||
|
# Execute power control action
|
||||||
|
log.info("Executing %s on VM %s", power_action, vm_name)
|
||||||
|
client = salt.client.LocalClient()
|
||||||
|
result = client.cmd(
|
||||||
|
f"{hypervisor}_*",
|
||||||
|
f"virt.{virt_function}",
|
||||||
|
[vm_name],
|
||||||
|
expr_form="glob"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check result
|
||||||
|
if result and any(success for success in result.values()):
|
||||||
|
log.info("Successfully executed %s on VM %s", power_action, vm_name)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
log.error("Failed to execute %s on VM %s: %s", power_action, vm_name, result)
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error processing power control for VM %s: %s", vm_config.get('hostname', 'unknown'), str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def process_hypervisor_power_requests(hypervisor_path: str) -> None:
|
||||||
|
"""
|
||||||
|
Process power control requests for a single hypervisor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hypervisor_path: Path to the hypervisor directory
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Get hypervisor name from path
|
||||||
|
hypervisor = os.path.basename(hypervisor_path)
|
||||||
|
|
||||||
|
# Read VMs file
|
||||||
|
vms_file = os.path.join(os.path.dirname(hypervisor_path), f"{hypervisor}VMs")
|
||||||
|
if not os.path.exists(vms_file):
|
||||||
|
log.debug("No VMs file found at %s", vms_file)
|
||||||
|
return
|
||||||
|
|
||||||
|
# First, read the file without locking to check if any VM has a powercontrol key
|
||||||
|
nodes_config = read_json_file(vms_file)
|
||||||
|
if not nodes_config:
|
||||||
|
log.debug("Empty VMs configuration in %s", vms_file)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check if any VM has a powercontrol key
|
||||||
|
if not has_power_control_requests(nodes_config):
|
||||||
|
log.debug("No power control requests found in %s", vms_file)
|
||||||
|
return
|
||||||
|
|
||||||
|
# If we found powercontrol keys, lock the file and process the requests
|
||||||
|
with FileLock(vms_file):
|
||||||
|
# Read the VMs file again with the lock to ensure we have the latest data
|
||||||
|
nodes_config = read_json_file(vms_file)
|
||||||
|
if not nodes_config:
|
||||||
|
log.debug("Empty VMs configuration in %s (after lock)", vms_file)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Track if any changes were made
|
||||||
|
changes_made = False
|
||||||
|
|
||||||
|
# Process each VM configuration
|
||||||
|
for i, vm_config in enumerate(nodes_config):
|
||||||
|
if 'powercontrol' in vm_config:
|
||||||
|
# Process power control request
|
||||||
|
log.info("Found power control request for VM %s_%s: %s",
|
||||||
|
vm_config.get('hostname', 'unknown'),
|
||||||
|
vm_config.get('role', 'unknown'),
|
||||||
|
vm_config['powercontrol'])
|
||||||
|
|
||||||
|
success = process_power_control(hypervisor, vm_config)
|
||||||
|
if success:
|
||||||
|
# Remove powercontrol key
|
||||||
|
log.info("Power control action successful, removing powercontrol key")
|
||||||
|
del nodes_config[i]['powercontrol']
|
||||||
|
changes_made = True
|
||||||
|
|
||||||
|
# Write updated configuration if changes were made
|
||||||
|
if changes_made:
|
||||||
|
log.info("Writing updated VM configuration to %s", vms_file)
|
||||||
|
write_json_file(vms_file, nodes_config)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to process hypervisor %s: %s", hypervisor_path, str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def start(interval: int = DEFAULT_INTERVAL,
|
||||||
|
base_path: str = DEFAULT_BASE_PATH) -> None:
|
||||||
|
"""
|
||||||
|
Process virtual machine power control requests.
|
||||||
|
|
||||||
|
This function processes power control requests for virtual machines
|
||||||
|
by monitoring the <hypervisor>VMs files for the "powercontrol" key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
interval: Time in seconds between engine runs (managed by salt-master)
|
||||||
|
base_path: Base path containing hypervisor configurations
|
||||||
|
"""
|
||||||
|
log.debug("Starting virtual power manager engine")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Process each hypervisor directory
|
||||||
|
for hypervisor_path in glob.glob(os.path.join(base_path, '*')):
|
||||||
|
if os.path.isdir(hypervisor_path):
|
||||||
|
process_hypervisor_power_requests(hypervisor_path)
|
||||||
|
|
||||||
|
log.debug("Virtual power manager completed successfully")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error in virtual power manager: %s", str(e))
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
mine_interval: 25
|
|
||||||
mine_functions:
|
|
||||||
network.ip_addrs:
|
|
||||||
- interface: {{ pillar.host.mainint }}
|
|
||||||
{%- if grains.role in ['so-eval','so-import','so-manager','so-managersearch','so-standalone'] %}
|
|
||||||
x509.get_pem_entries:
|
|
||||||
- glob_path: '/etc/pki/ca.crt'
|
|
||||||
{% endif -%}
|
|
||||||
7
salt/salt/files/vrt_engine.conf
Normal file
7
salt/salt/files/vrt_engine.conf
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
engines:
|
||||||
|
- virtual_node_manager:
|
||||||
|
interval: 10
|
||||||
|
base_path: /opt/so/saltstack/local/salt/hypervisor/hosts
|
||||||
|
- virtual_power_manager:
|
||||||
|
interval: 10
|
||||||
|
base_path: /opt/so/saltstack/local/salt/hypervisor/hosts
|
||||||
@@ -5,6 +5,7 @@ saltpymodules:
|
|||||||
- python3-docker
|
- python3-docker
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
# distribute to minions for salt upgrades
|
||||||
salt_bootstrap:
|
salt_bootstrap:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /usr/sbin/bootstrap-salt.sh
|
- name: /usr/sbin/bootstrap-salt.sh
|
||||||
|
|||||||
@@ -3,17 +3,24 @@
|
|||||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
Elastic License 2.0. #}
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% set role = salt['grains.get']('role', '') %}
|
||||||
|
{% if role in ['so-hypervisor','so-managerhype'] and salt['network.ip_addrs']('br0')|length > 0 %}
|
||||||
|
{% set interface = 'br0' %}
|
||||||
|
{% else %}
|
||||||
|
{% set interface = pillar.host.mainint %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% import_yaml 'salt/minion.defaults.yaml' as saltminion %}
|
{% import_yaml 'salt/minion.defaults.yaml' as saltminion %}
|
||||||
{% set SALTVERSION = saltminion.salt.minion.version | string %}
|
{% set SALTVERSION = saltminion.salt.minion.version | string %}
|
||||||
{% set INSTALLEDSALTVERSION = grains.saltversion | string %}
|
{% set INSTALLEDSALTVERSION = grains.saltversion | string %}
|
||||||
|
|
||||||
{% if grains.os_family == 'Debian' %}
|
{% if grains.os_family == 'Debian' %}
|
||||||
{% set SPLITCHAR = '+' %}
|
{% set SPLITCHAR = '+' %}
|
||||||
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
|
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion', 'salt-cloud'] %}
|
||||||
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
|
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% set SPLITCHAR = '-' %}
|
{% set SPLITCHAR = '-' %}
|
||||||
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
|
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion', 'salt-cloud'] %}
|
||||||
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
|
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls in allowed_states %}
|
{% if sls in allowed_states %}
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- salt.minion
|
- salt.minion
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
- salt.cloud
|
||||||
|
- salt.cloud.reactor_config_hypervisor
|
||||||
|
|
||||||
|
sync_runners:
|
||||||
|
salt.runner:
|
||||||
|
- name: saltutil.sync_runners
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
hold_salt_master_package:
|
hold_salt_master_package:
|
||||||
module.run:
|
module.run:
|
||||||
@@ -29,11 +48,41 @@ pillarWatch_engine:
|
|||||||
- name: /etc/salt/engines/pillarWatch.py
|
- name: /etc/salt/engines/pillarWatch.py
|
||||||
- source: salt://salt/engines/master/pillarWatch.py
|
- source: salt://salt/engines/master/pillarWatch.py
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
vrt_engine_config:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/master.d/vrt_engine.conf
|
||||||
|
- source: salt://salt/files/vrt_engine.conf
|
||||||
|
- watch_in:
|
||||||
|
- service: salt_master_service
|
||||||
|
|
||||||
|
virtual_node_manager_engine:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/engines/virtual_node_manager.py
|
||||||
|
- source: salt://salt/engines/master/virtual_node_manager.py
|
||||||
|
- watch_in:
|
||||||
|
- service: salt_master_service
|
||||||
|
|
||||||
|
virtual_power_manager_engine:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/engines/virtual_power_manager.py
|
||||||
|
- source: salt://salt/engines/master/virtual_power_manager.py
|
||||||
|
- watch_in:
|
||||||
|
- service: salt_master_service
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
engines_config:
|
engines_config:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /etc/salt/master.d/engines.conf
|
- name: /etc/salt/master.d/engines.conf
|
||||||
- source: salt://salt/files/engines.conf
|
- source: salt://salt/files/engines.conf
|
||||||
|
|
||||||
|
# update the bootstrap script when used for salt-cloud
|
||||||
|
salt_bootstrap_cloud:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/saltstack/salt/lib/python3.10/site-packages/salt/cloud/deploy/bootstrap-salt.sh
|
||||||
|
- source: salt://salt/scripts/bootstrap-salt.sh
|
||||||
|
- show_changes: False
|
||||||
|
|
||||||
salt_master_service:
|
salt_master_service:
|
||||||
service.running:
|
service.running:
|
||||||
- name: salt-master
|
- name: salt-master
|
||||||
|
|||||||
@@ -6,8 +6,30 @@
|
|||||||
# this state was seperated from salt.minion state since it is called during setup
|
# this state was seperated from salt.minion state since it is called during setup
|
||||||
# GLOBALS are imported in the salt.minion state and that is not available at that point in setup
|
# GLOBALS are imported in the salt.minion state and that is not available at that point in setup
|
||||||
# this state is included in the salt.minion state
|
# this state is included in the salt.minion state
|
||||||
|
|
||||||
|
{% from 'salt/map.jinja' import interface %}
|
||||||
|
{% from 'salt/map.jinja' import role %}
|
||||||
|
|
||||||
mine_functions:
|
mine_functions:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /etc/salt/minion.d/mine_functions.conf
|
- name: /etc/salt/minion.d/mine_functions.conf
|
||||||
- source: salt://salt/etc/minion.d/mine_functions.conf.jinja
|
- contents: |
|
||||||
- template: jinja
|
mine_interval: 25
|
||||||
|
mine_functions:
|
||||||
|
network.ip_addrs:
|
||||||
|
- interface: {{ interface }}
|
||||||
|
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
|
||||||
|
x509.get_pem_entries:
|
||||||
|
- glob_path: '/etc/pki/ca.crt'
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
mine_update_mine_functions:
|
||||||
|
module.run:
|
||||||
|
- mine.update: []
|
||||||
|
- onchanges:
|
||||||
|
- file: mine_functions
|
||||||
|
- onlyif:
|
||||||
|
- systemctl is-active --quiet salt-minion
|
||||||
|
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
|
||||||
|
- systemctl is-active --quiet salt-master
|
||||||
|
{% endif %}
|
||||||
|
|||||||
@@ -48,12 +48,14 @@ install_salt_minion:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
||||||
|
# only hold the package if it is already installed
|
||||||
hold_salt_packages:
|
hold_salt_packages:
|
||||||
pkg.held:
|
pkg.held:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
{% for package in SALTPACKAGES %}
|
{% for package in SALTPACKAGES %}
|
||||||
|
{% if salt['pkg.version'](package) %}
|
||||||
- {{ package }}: {{SALTVERSION}}-0.*
|
- {{ package }}: {{SALTVERSION}}-0.*
|
||||||
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
remove_error_log_level_logfile:
|
remove_error_log_level_logfile:
|
||||||
|
|||||||
Binary file not shown.
80
salt/salt/scripts/fixLibvirt.py
Normal file
80
salt/salt/scripts/fixLibvirt.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# resolves https://github.com/saltstack/salt/issues/64962
|
||||||
|
# 3006.2+ has and issue with libvirt
|
||||||
|
# use pip to install lief then run this script to resolve
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import grp
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import pwd
|
||||||
|
import shutil
|
||||||
|
##
|
||||||
|
import dbus # dnf -y install python3-dbus
|
||||||
|
##
|
||||||
|
import lief # https://pypi.org/project/lief/
|
||||||
|
|
||||||
|
# https://github.com/saltstack/salt/issues/64962
|
||||||
|
|
||||||
|
salt_root = pathlib.Path('/opt/saltstack')
|
||||||
|
src_lib = pathlib.Path('/lib64/libldap.so.2')
|
||||||
|
dst_lib = salt_root.joinpath('salt', 'lib', 'libldap.so.2')
|
||||||
|
|
||||||
|
uname = 'root'
|
||||||
|
gname = 'root'
|
||||||
|
|
||||||
|
lib = lief.parse(str(src_lib))
|
||||||
|
sym = next(i for i in lib.imported_symbols if i.name == 'EVP_md2')
|
||||||
|
if sym:
|
||||||
|
# Get the Salt services from DBus.
|
||||||
|
sysbus = dbus.SystemBus()
|
||||||
|
sysd = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
|
||||||
|
mgr = dbus.Interface(sysd, 'org.freedesktop.systemd1.Manager')
|
||||||
|
svcs = []
|
||||||
|
for i in mgr.ListUnits():
|
||||||
|
# first element is unit name.
|
||||||
|
if not str(i[0]).startswith('salt-'):
|
||||||
|
continue
|
||||||
|
svc = sysbus.get_object('org.freedesktop.systemd1', object_path = mgr.GetUnit(str(i[0])))
|
||||||
|
props = dbus.Interface(svc, dbus_interface = 'org.freedesktop.DBus.Properties')
|
||||||
|
state = props.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
|
||||||
|
if str(state) == 'active':
|
||||||
|
svcs.append(i[0])
|
||||||
|
# Get the user/group
|
||||||
|
u = pwd.getpwnam(uname)
|
||||||
|
g = grp.getgrnam(gname)
|
||||||
|
# Modify
|
||||||
|
print('Modifications necessary.')
|
||||||
|
if svcs:
|
||||||
|
# Stop the services first.
|
||||||
|
for sn in svcs:
|
||||||
|
mgr.StopUnit(sn, 'replace')
|
||||||
|
if dst_lib.exists():
|
||||||
|
# 3.10 deprecated .utcnow().
|
||||||
|
#dst_lib_bak = pathlib.Path(str(dst_lib) + '.bak_{0}'.format(datetime.datetime.now(datetime.UTC).timestamp()))
|
||||||
|
dst_lib_bak = pathlib.Path(str(dst_lib) + '.bak_{0}'.format(datetime.datetime.utcnow().timestamp()))
|
||||||
|
os.rename(dst_lib, dst_lib_bak)
|
||||||
|
print('Destination file {0} exists; backed up to {1}.'.format(dst_lib, dst_lib_bak))
|
||||||
|
lib.remove_dynamic_symbol(sym)
|
||||||
|
lib.write(str(dst_lib))
|
||||||
|
os.chown(dst_lib, u.pw_uid, g.gr_gid)
|
||||||
|
os.chmod(dst_lib, src_lib.stat().st_mode)
|
||||||
|
# Before we restart services, we also want to remove any python caches.
|
||||||
|
for root, dirs, files in os.walk(salt_root):
|
||||||
|
for f in files:
|
||||||
|
if f.lower().endswith('.pyc'):
|
||||||
|
fpath = os.path.join(root, f)
|
||||||
|
os.remove(fpath)
|
||||||
|
print('Removed file {0}'.format(fpath))
|
||||||
|
if '__pycache__' in dirs:
|
||||||
|
dpath = os.path.join(root, '__pycache__')
|
||||||
|
shutil.rmtree(dpath)
|
||||||
|
print('Removed directory {0}'.format(dpath))
|
||||||
|
# And then start the units that were started before.
|
||||||
|
if svcs:
|
||||||
|
for sn in svcs:
|
||||||
|
mgr.RestartUnit(sn, 'replace')
|
||||||
|
|
||||||
|
print('Done.')
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
{% from 'salt/map.jinja' import interface -%}
|
||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=The Salt Minion
|
Description=The Salt Minion
|
||||||
Documentation=man:salt-minion(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltproject.io/en/latest/contents.html
|
Documentation=man:salt-minion(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltproject.io/en/latest/contents.html
|
||||||
@@ -8,7 +10,7 @@ KillMode=process
|
|||||||
Type=notify
|
Type=notify
|
||||||
NotifyAccess=all
|
NotifyAccess=all
|
||||||
LimitNOFILE=8192
|
LimitNOFILE=8192
|
||||||
ExecStartPre=/bin/bash -c 'until /sbin/ip -4 addr show dev {{ salt["pillar.get"]("host:mainint") }} | grep -q "inet "; do sleep 1; done'
|
ExecStartPre=/bin/bash -c 'until /sbin/ip -4 addr show dev {{ interface }} | grep -q "inet "; do sleep 1; done'
|
||||||
ExecStart=/usr/bin/salt-minion
|
ExecStart=/usr/bin/salt-minion
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,21 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features') and salt['grains.get']('salt-cloud', {}) %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- sensor.vm.network
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
offload_script:
|
offload_script:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable
|
- name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable
|
||||||
|
|||||||
71
salt/sensor/vm/network.sls
Normal file
71
salt/sensor/vm/network.sls
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
{% set mainint = salt['pillar.get']('host:mainint', 'enp1s0') %}
|
||||||
|
{% set interfaces = salt['network.interfaces']() %}
|
||||||
|
{% set non_enp1s0_interfaces = [] %}
|
||||||
|
{% for iface, data in interfaces.items() %}
|
||||||
|
{% if iface != mainint and not iface.startswith(('veth', 'docker', 'lo', 'br', 'sobridge', 'bond')) %}
|
||||||
|
{% do non_enp1s0_interfaces.append(iface) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
# Create bond0 interface with NetworkManager
|
||||||
|
bond0_interface:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
nmcli con add type bond \
|
||||||
|
con-name bond0 \
|
||||||
|
ifname bond0 \
|
||||||
|
mode 0 \
|
||||||
|
miimon 100 \
|
||||||
|
ipv4.method disabled \
|
||||||
|
ipv6.method ignore \
|
||||||
|
ipv6.addr-gen-mode default \
|
||||||
|
connection.autoconnect yes
|
||||||
|
nmcli con mod bond0 ethernet.mtu 9000
|
||||||
|
nmcli con up bond0
|
||||||
|
- unless: nmcli con show bond0
|
||||||
|
{% if non_enp1s0_interfaces|length > 0 %}
|
||||||
|
- require_in:
|
||||||
|
{% for iface in non_enp1s0_interfaces %}
|
||||||
|
- cmd: {{ iface }}_slave
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Configure non-enp1s0 interfaces as bond slaves first
|
||||||
|
{% if non_enp1s0_interfaces|length > 0 %}
|
||||||
|
{% for iface in non_enp1s0_interfaces %}
|
||||||
|
{{ iface }}_slave:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
nmcli con add type ethernet \
|
||||||
|
con-name bond0-slave-{{ iface }} \
|
||||||
|
ifname {{ iface }} \
|
||||||
|
master bond0 \
|
||||||
|
slave-type bond \
|
||||||
|
ethernet.mtu 9000
|
||||||
|
nmcli con up bond0-slave-{{ iface }}
|
||||||
|
- unless: nmcli con show bond0-slave-{{ iface }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
{% endif %}
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
post_setup_cron:
|
|
||||||
cron.present:
|
|
||||||
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
|
|
||||||
- identifier: post_setup_cron
|
|
||||||
- user: root
|
|
||||||
- minute: '*/5'
|
|
||||||
- identifier: post_setup_cron
|
|
||||||
9
salt/setup/virt/init.sls
Normal file
9
salt/setup/virt/init.sls
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{% set role = grains.id.split("_") | last %}
|
||||||
|
include:
|
||||||
|
- setup.virt.setHostname
|
||||||
|
- setup.virt.sominion
|
||||||
|
- common.packages # python3-dnf-plugin-versionlock
|
||||||
|
{% if role in ['sensor', 'heavynode'] %}
|
||||||
|
- sensor.vm.network
|
||||||
|
{% endif %}
|
||||||
|
- setup.virt.setSalt
|
||||||
13
salt/setup/virt/setHostname.sls
Normal file
13
salt/setup/virt/setHostname.sls
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
setHostname_{{grains.id.split("_") | first}}:
|
||||||
|
cmd.run:
|
||||||
|
- name: hostnamectl set-hostname --static {{grains.id.split("_") | first}}
|
||||||
|
network.system:
|
||||||
|
- name: {{grains.id.split("_") | first}}
|
||||||
|
- enabled: True
|
||||||
|
- hostname: {{grains.id.split("_") | first}}
|
||||||
|
- apply_hostname: True
|
||||||
18
salt/setup/virt/setSalt.sls
Normal file
18
salt/setup/virt/setSalt.sls
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
set_role_grain:
|
||||||
|
grains.present:
|
||||||
|
- name: role
|
||||||
|
- value: so-{{ grains.id.split("_") | last }}
|
||||||
|
|
||||||
|
set_highstate:
|
||||||
|
file.append:
|
||||||
|
- name: /etc/salt/minion
|
||||||
|
- text: 'startup_states: highstate'
|
||||||
|
|
||||||
|
enable_salt_minion:
|
||||||
|
service.enabled:
|
||||||
|
- name: salt-minion
|
||||||
89
salt/setup/virt/soinstall.map.jinja
Normal file
89
salt/setup/virt/soinstall.map.jinja
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% set nodetype = grains.id.split("_") | last %}
|
||||||
|
{% set hypervisor = salt['grains.get']('salt-cloud:profile').split('-')[1] %}
|
||||||
|
|
||||||
|
{# Import hardware details from VM hardware tracking file #}
|
||||||
|
{% import_json 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ grains.id as vm_hardware %}
|
||||||
|
|
||||||
|
{% set DATA = {} %}
|
||||||
|
{% do DATA.update({'MNIC': 'enp1s0'}) %}
|
||||||
|
{% do DATA.update({'MAINIP': grains.ip_interfaces.get(DATA.MNIC)[0]}) %}
|
||||||
|
|
||||||
|
{# Use CPU value from VM hardware file if available, otherwise fallback to grains #}
|
||||||
|
{% if vm_hardware and vm_hardware.get('config', {}).get('cpu') %}
|
||||||
|
{% do DATA.update({'CPUCORES': vm_hardware.get('config', {}).get('cpu')|int }) %}
|
||||||
|
{% do salt.log.info('Using CPU from VM hardware file: ' ~ vm_hardware.get('config', {}).get('cpu')|string) %}
|
||||||
|
{% else %}
|
||||||
|
{% do DATA.update({'CPUCORES': grains.num_cpus }) %}
|
||||||
|
{% do salt.log.error('Using CPU from grains: ' ~ grains.num_cpus|string) %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# Use memory value from VM hardware file if available, otherwise fallback to grains. If grains is used, it will be from cpu/mem from the base domain. #}
|
||||||
|
{% if vm_hardware and vm_hardware.get('config', {}).get('memory') %}
|
||||||
|
{% set total_mem = vm_hardware.get('config', {}).get('memory')|int * 1024 %}
|
||||||
|
{% do salt.log.info('Using memory from VM hardware file: ' ~ vm_hardware.get('config', {}).get('memory')|string ~ ' (converted to ' ~ total_mem|string ~ ')') %}
|
||||||
|
{% else %}
|
||||||
|
{% set total_mem = grains.mem_total %}
|
||||||
|
{% do salt.log.error('Using memory from grains: ' ~ total_mem|string) %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% do DATA.update({'NODE_DESCRIPTION': 'VM of ' ~ hypervisor}) %}
|
||||||
|
{% do DATA.update({'NODETYPE': nodetype | upper}) %}
|
||||||
|
|
||||||
|
{% if nodetype in ['standalone', 'sensor', 'heavynode']%}
|
||||||
|
{% do DATA.update({'INTERFACE': 'bond0'}) %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# Calculate reasonable core usage #}
|
||||||
|
{% set cores_for_zeek = (DATA.CPUCORES / 2) - 1 %}
|
||||||
|
{% set lb_procs_round = cores_for_zeek|round|int %}
|
||||||
|
{% set lb_procs = 1 if lb_procs_round < 1 else lb_procs_round %}
|
||||||
|
{% do salt.log.info('Cores for load balancing: ' ~ lb_procs|string) %}
|
||||||
|
{# Check memory conditions #}
|
||||||
|
{% set low_mem = false %}
|
||||||
|
{% do salt.log.info('Memory check using total_mem: ' ~ total_mem|string) %}
|
||||||
|
{% if nodetype in ['standalone', 'heavynode'] %}
|
||||||
|
{% if total_mem > 15000 and total_mem < 24000 %}
|
||||||
|
{% set low_mem = true %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{# Set CORECOUNT based on memory conditions #}
|
||||||
|
{% if low_mem %}
|
||||||
|
{% do DATA.update({'CORECOUNT': 1}) %}
|
||||||
|
{% else %}
|
||||||
|
{% do DATA.update({'CORECOUNT': lb_procs}) %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
{% if nodetype in ['searchnode', 'receiver', 'fleet', 'heavynode'] %}
|
||||||
|
{# we can't use the host grain here because the grain may not be updated yet from the hostname change #}
|
||||||
|
{% do DATA.update({'LSHOSTNAME': grains.id.split("_") | first}) %}
|
||||||
|
{% if total_mem >= 32000 or nodetype in ['managersearch','heavynode','standalone'] %}
|
||||||
|
{% set LSHEAP="1000m" %}
|
||||||
|
{% elif nodetype == 'eval' %}
|
||||||
|
{% set LSHEAP="700m" %}
|
||||||
|
{% else %}
|
||||||
|
{% set LSHEAP="500m" %}
|
||||||
|
{% endif %}
|
||||||
|
{% do DATA.update({'LSHEAP': LSHEAP}) %}
|
||||||
|
{% endif %}
|
||||||
|
{% if nodetype in ['searchnode', 'heavynode'] %}
|
||||||
|
{# this replicates the function es_heapsize in so-functions #}
|
||||||
|
{% if total_mem < 8000 %}
|
||||||
|
{% set ES_HEAP_SIZE = "600m" %}
|
||||||
|
{% elif total_mem >= 100000 %}
|
||||||
|
{% set ES_HEAP_SIZE = "25000m" %}
|
||||||
|
{% else %}
|
||||||
|
{% set ES_HEAP_SIZE = (total_mem / 3) | int %}
|
||||||
|
{% if ES_HEAP_SIZE > 25000 %}
|
||||||
|
{% set ES_HEAP_SIZE = "25000m" %}
|
||||||
|
{% else %}
|
||||||
|
{% set ES_HEAP_SIZE = ES_HEAP_SIZE ~ "m" %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% do DATA.update({'ES_HEAP_SIZE': ES_HEAP_SIZE}) %}
|
||||||
|
{% endif %}
|
||||||
32
salt/setup/virt/sominion.sls
Normal file
32
salt/setup/virt/sominion.sls
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'setup/virt/soinstall.map.jinja' import DATA %}
|
||||||
|
|
||||||
|
create_pillar:
|
||||||
|
event.send:
|
||||||
|
- name: setup/so-minion
|
||||||
|
- data:
|
||||||
|
HYPERVISOR_HOST: {{ grains.hypervisor_host }}
|
||||||
|
MAINIP: {{ DATA.MAINIP }}
|
||||||
|
MNIC: {{ DATA.MNIC }}
|
||||||
|
NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}'
|
||||||
|
NODETYPE: {{ DATA.NODETYPE }}
|
||||||
|
CPUCORES: {{ DATA.CPUCORES }}
|
||||||
|
{% if 'CORECOUNT' in DATA %}
|
||||||
|
CORECOUNT: {{ DATA.CORECOUNT }}
|
||||||
|
{% endif %}
|
||||||
|
{% if 'INTERFACE' in DATA %}
|
||||||
|
INTERFACE: {{ DATA.INTERFACE }}
|
||||||
|
{% endif %}
|
||||||
|
{% if 'ES_HEAP_SIZE' in DATA %}
|
||||||
|
ES_HEAP_SIZE: {{ DATA.ES_HEAP_SIZE }}
|
||||||
|
{% endif %}
|
||||||
|
{% if 'LSHOSTNAME' in DATA %}
|
||||||
|
LSHOSTNAME: {{ DATA.LSHOSTNAME }}
|
||||||
|
{% endif %}
|
||||||
|
{% if 'LSHEAP' in DATA %}
|
||||||
|
LS_HEAP_SIZE: {{ DATA.LSHEAP }}
|
||||||
|
{% endif %}
|
||||||
88
salt/soc/dyanno/hypervisor/hypervisor.yaml
Normal file
88
salt/soc/dyanno/hypervisor/hypervisor.yaml
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
hypervisor:
|
||||||
|
hosts:
|
||||||
|
defaultHost:
|
||||||
|
title: defaultHost
|
||||||
|
description: "Hypervisor Configuration"
|
||||||
|
syntax: json
|
||||||
|
file: true
|
||||||
|
global: true
|
||||||
|
uiElementsDeleteMessage: "Warning: Following the actions below will permanently destroy the virtual machine."
|
||||||
|
uiElements:
|
||||||
|
- field: hostname
|
||||||
|
label: "Hostname"
|
||||||
|
forcedType: string
|
||||||
|
required: true
|
||||||
|
readonly: true
|
||||||
|
- field: role
|
||||||
|
label: "Role"
|
||||||
|
required: true
|
||||||
|
readonly: true
|
||||||
|
options:
|
||||||
|
- searchnode
|
||||||
|
- sensor
|
||||||
|
- receiver
|
||||||
|
- idh
|
||||||
|
- heavynode
|
||||||
|
- fleet
|
||||||
|
- field: network_mode
|
||||||
|
label: "Choose static4 or dhcp4. If static4, populate IP details below."
|
||||||
|
required: true
|
||||||
|
readonly: true
|
||||||
|
options:
|
||||||
|
- static4
|
||||||
|
- dhcp4
|
||||||
|
- field: ip4
|
||||||
|
label: "IP address with netmask: 192.168.1.10/24 - If using dhcp, enter a character in this field and delete it. This will eliminate the incomplete/invalid setting entry error as well as the improperly formatted address error."
|
||||||
|
forcedType: string
|
||||||
|
regex: "^$|^(\\d{1,3}\\.){3}\\d{1,3}/\\d{1,2}$"
|
||||||
|
regexFailureMessage: "Enter a properly formatted CIDR address"
|
||||||
|
readonly: true
|
||||||
|
- field: gw4
|
||||||
|
label: "Gateway - If using dhcp, enter a character in this field and delete it. This will eliminate the incomplete/invalid setting entry error as well as the improperly formatted address error."
|
||||||
|
forcedType: string
|
||||||
|
regex: "^$|^(\\d{1,3}\\.){3}\\d{1,3}$"
|
||||||
|
regexFailureMessage: "Enter a properly formatted IP address"
|
||||||
|
readonly: true
|
||||||
|
- field: dns4
|
||||||
|
label: "Single DNS IP or comma separated list: 192.168.1.1,8.8.8.8 - If using dhcp, enter a character in this field and delete it. This will eliminate the incomplete/invalid setting entry error as well as the improperly formatted address error."
|
||||||
|
forcedType: string
|
||||||
|
regex: "^$|^(\\d{1,3}\\.){3}\\d{1,3}(,\\s*(\\d{1,3}\\.){3}\\d{1,3})*$"
|
||||||
|
regexFailureMessage: "Enter a properly formatted IP address or list of addresses"
|
||||||
|
readonly: true
|
||||||
|
- field: search4
|
||||||
|
label: "Search domain"
|
||||||
|
forcedType: string
|
||||||
|
readonly: true
|
||||||
|
- field: cpu
|
||||||
|
label: "CPU cores to assign. Free: FREE | Total: TOTAL"
|
||||||
|
forcedType: int
|
||||||
|
required: true
|
||||||
|
readonly: true
|
||||||
|
- field: memory
|
||||||
|
label: "Memory to assign, in GB. Free: FREE | Total: TOTAL"
|
||||||
|
required: true
|
||||||
|
readonly: true
|
||||||
|
forcedType: int
|
||||||
|
- field: disk
|
||||||
|
label: "Disk(s) for passthrough. Free: FREE | Total: TOTAL"
|
||||||
|
readonly: true
|
||||||
|
options: []
|
||||||
|
forcedType: '[]int'
|
||||||
|
- field: copper
|
||||||
|
label: "Copper port(s) for passthrough. Free: FREE | Total: TOTAL"
|
||||||
|
readonly: true
|
||||||
|
options: []
|
||||||
|
forcedType: '[]int'
|
||||||
|
- field: sfp
|
||||||
|
label: "SFP port(s) for passthrough. Free: FREE | Total: TOTAL"
|
||||||
|
readonly: true
|
||||||
|
options: []
|
||||||
|
forcedType: '[]int'
|
||||||
|
- field: powercontrol
|
||||||
|
label: "Execute VM power operations"
|
||||||
|
options:
|
||||||
|
- Start
|
||||||
|
- Reboot
|
||||||
|
- Shutdown
|
||||||
|
- Reset
|
||||||
|
- Stop
|
||||||
51
salt/soc/dyanno/hypervisor/init.sls
Normal file
51
salt/soc/dyanno/hypervisor/init.sls
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
{% from 'hypervisor/map.jinja' import HYPERVISORS %}
|
||||||
|
|
||||||
|
hypervisor_annotation:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/saltstack/default/salt/hypervisor/soc_hypervisor.yaml
|
||||||
|
- source: salt://soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja
|
||||||
|
- template: jinja
|
||||||
|
- user: socore
|
||||||
|
- group: socore
|
||||||
|
- defaults:
|
||||||
|
HYPERVISORS: {{ HYPERVISORS }}
|
||||||
|
baseDomainStatus: {{ salt['pillar.get']('baseDomain:status', 'Initialized') }}
|
||||||
|
|
||||||
|
{% for role in HYPERVISORS %}
|
||||||
|
{% for hypervisor in HYPERVISORS[role].keys() %}
|
||||||
|
hypervisor_host_directory_{{hypervisor}}:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/saltstack/local/salt/hypervisor/hosts/{{hypervisor}}
|
||||||
|
- makedirs: True
|
||||||
|
- user: socore
|
||||||
|
- group: socore
|
||||||
|
- recurse:
|
||||||
|
- user
|
||||||
|
- group
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_no_license_detected:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_no_license_detected
|
||||||
|
- comment:
|
||||||
|
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
14
salt/soc/dyanno/hypervisor/map.jinja
Normal file
14
salt/soc/dyanno/hypervisor/map.jinja
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {}) %}
|
||||||
|
|
||||||
|
{# Define the list of process steps in order (case-sensitive) #}
|
||||||
|
{% set PROCESS_STEPS = [
|
||||||
|
'Processing',
|
||||||
|
'IP Configuration',
|
||||||
|
'Starting Create',
|
||||||
|
'Executing Deploy Script',
|
||||||
|
'Initialize Minion Pillars',
|
||||||
|
'Created Instance',
|
||||||
|
'Hardware Configuration',
|
||||||
|
'Highstate Initiated',
|
||||||
|
'Destroyed Instance'
|
||||||
|
] %}
|
||||||
228
salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja
Normal file
228
salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0.
|
||||||
|
|
||||||
|
Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
|
||||||
|
"You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
in the software, and you may not remove or obscure any functionality in the
|
||||||
|
software that is protected by the license key." #}
|
||||||
|
|
||||||
|
{%- if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||||
|
|
||||||
|
{%- import_yaml 'soc/dyanno/hypervisor/hypervisor.yaml' as ANNOTATION -%}
|
||||||
|
{%- from 'hypervisor/map.jinja' import HYPERVISORS -%}
|
||||||
|
{%- from 'soc/dyanno/hypervisor/map.jinja' import PROCESS_STEPS -%}
|
||||||
|
|
||||||
|
{%- set TEMPLATE = ANNOTATION.hypervisor.hosts.pop('defaultHost') -%}
|
||||||
|
|
||||||
|
{%- macro update_description(description, cpu_free, mem_free, disk_free, copper_free, sfp_free, vm_list, cpu_total, mem_total, disk_total, copper_total, sfp_total) -%}
|
||||||
|
#### Resource Summary
|
||||||
|
| | CPU Cores | Memory (GB) | Disk | Copper | SFP |
|
||||||
|
|-----------|-----------|-------------|-------------|-------------|-------------|
|
||||||
|
| Available | {{ cpu_free }} | {{ mem_free }} | {{ disk_free | replace('\n', ',') if disk_free else 'None' }} | {{ copper_free | replace('\n', ',') if copper_free else 'None' }} | {{ sfp_free | replace('\n', ',') if sfp_free else 'None' }} |
|
||||||
|
| Total | {{ cpu_total }} | {{ mem_total }} | {{ disk_total | replace('\n', ',') }} | {{ copper_total | replace('\n', ',') }} | {{ sfp_total | replace('\n', ',') }} |
|
||||||
|
|
||||||
|
{%- if baseDomainStatus == 'Initialized' %}
|
||||||
|
{%- if vm_list %}
|
||||||
|
#### Virtual Machines
|
||||||
|
Status values: {% for step in PROCESS_STEPS %}{{ step }}{% if not loop.last %}, {% endif %}{% endfor %}. "Last Updated" shows when status changed. After "Highstate Initiated", only "Destroyed Instance" updates the timestamp.
|
||||||
|
|
||||||
|
| Name | Status | CPU Cores | Memory (GB)| Disk | Copper | SFP | Last Updated |
|
||||||
|
|--------------------|--------------------|-----------|------------|------|--------|------|---------------------|
|
||||||
|
{%- for hostname, vm_data in vm_list.items() %}
|
||||||
|
{%- set vm_status = vm_data.get('status', {}).get('status', 'Unknown') %}
|
||||||
|
{%- set is_destroyed = vm_status == 'Destroyed Instance' %}
|
||||||
|
{%- if is_destroyed %}
|
||||||
|
| {{ hostname }} | {{ vm_status }} | - | - | - | - | - | {{ vm_data.get('status', {}).get('timestamp', 'Never') | replace('T', ' ') | regex_replace('\\.[0-9]+', '') }} |
|
||||||
|
{%- else %}
|
||||||
|
| {{ hostname }} | {{ vm_status }} | {{ vm_data.get('config', {}).get('cpu', 'N/A') }} | {{ vm_data.get('config', {}).get('memory', 'N/A') }} | {{ vm_data.get('config', {}).get('disk', []) | join(',') if vm_data.get('config', {}).get('disk') else '-' }} | {{ vm_data.get('config', {}).get('copper', []) | join(',') if vm_data.get('config', {}).get('copper') else '-' }} | {{ vm_data.get('config', {}).get('sfp', []) | join(',') if vm_data.get('config', {}).get('sfp') else '-' }} | {{ vm_data.get('status', {}).get('timestamp', 'Never') | replace('T', ' ') | regex_replace('\\.[0-9]+', '') }} |
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- else %}
|
||||||
|
#### Virtual Machines
|
||||||
|
Status values: {% for step in PROCESS_STEPS %}{{ step }}{% if not loop.last %}, {% endif %}{% endfor %}. "Last Updated" shows when status changed. After "Highstate Initiated", only "Destroyed Instance" updates the timestamp.
|
||||||
|
|
||||||
|
No Virtual Machines Found
|
||||||
|
{%- endif %}
|
||||||
|
{%- else %}
|
||||||
|
#### WARNING
|
||||||
|
|
||||||
|
Base domain has not been initialized.
|
||||||
|
{%- endif %}
|
||||||
|
{%- endmacro -%}
|
||||||
|
|
||||||
|
{%- macro update_label(label, total, free) -%}
|
||||||
|
{{- label | replace('TOTAL', total | string)
|
||||||
|
| replace('FREE', free | string) -}}
|
||||||
|
{%- endmacro -%}
|
||||||
|
|
||||||
|
{%- macro get_available_pci(hw_config, device_type, used_indices) -%}
|
||||||
|
{%- set available = [] -%}
|
||||||
|
{%- for idx in hw_config.get(device_type, {}).keys() -%}
|
||||||
|
{%- if idx | string not in used_indices -%}
|
||||||
|
{%- do available.append(idx) -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
{{- available | join(',') -}}
|
||||||
|
{%- endmacro -%}
|
||||||
|
|
||||||
|
{%- macro update_resource_field(field, free_value, total_value, unit_label) -%}
|
||||||
|
{%- set resource_regex = '' -%}
|
||||||
|
{%- if free_value < 10 -%}
|
||||||
|
{%- set resource_regex = '^[1-' ~ free_value ~ ']$' -%}
|
||||||
|
{%- elif free_value < 100 -%}
|
||||||
|
{%- set tens_digit = free_value // 10 -%}
|
||||||
|
{%- set ones_digit = free_value % 10 -%}
|
||||||
|
{%- if ones_digit == 0 -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-' ~ (tens_digit-1) ~ '][0-9]|' ~ tens_digit ~ '0)$' -%}
|
||||||
|
{%- else -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-' ~ (tens_digit-1) ~ '][0-9]|' ~ tens_digit ~ '[0-' ~ ones_digit ~ '])$' -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- elif free_value < 1000 -%}
|
||||||
|
{%- set hundreds_digit = free_value // 100 -%}
|
||||||
|
{%- set tens_digit = (free_value % 100) // 10 -%}
|
||||||
|
{%- set ones_digit = free_value % 10 -%}
|
||||||
|
{%- if hundreds_digit == 1 -%}
|
||||||
|
{%- if tens_digit == 0 and ones_digit == 0 -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-9][0-9]|100)$' -%}
|
||||||
|
{%- elif tens_digit == 0 -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-9][0-9]|10[0-' ~ ones_digit ~ '])$' -%}
|
||||||
|
{%- elif ones_digit == 0 -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-9][0-9]|10[0-9]|1[1-' ~ tens_digit ~ ']0)$' -%}
|
||||||
|
{%- else -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-9][0-9]|10[0-9]|1[1-' ~ (tens_digit-1) ~ '][0-9]|1' ~ tens_digit ~ '[0-' ~ ones_digit ~ '])$' -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- else -%}
|
||||||
|
{%- if tens_digit == 0 and ones_digit == 0 -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-' ~ (hundreds_digit-1) ~ '][0-9][0-9]|' ~ hundreds_digit ~ '00)$' -%}
|
||||||
|
{%- elif ones_digit == 0 -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-' ~ (hundreds_digit-1) ~ '][0-9][0-9]|' ~ hundreds_digit ~ '[0-' ~ tens_digit ~ ']0)$' -%}
|
||||||
|
{%- else -%}
|
||||||
|
{%- set resource_regex = '^([1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-' ~ (hundreds_digit-1) ~ '][0-9][0-9]|' ~ hundreds_digit ~ '[0-' ~ (tens_digit-1) ~ '][0-9]|' ~ hundreds_digit ~ tens_digit ~ '[0-' ~ ones_digit ~ '])$' -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- do field.update({
|
||||||
|
'label': field.label | replace('FREE', free_value | string) | replace('TOTAL', total_value | string),
|
||||||
|
'regex': resource_regex,
|
||||||
|
'regexFailureMessage': 'Enter a value not exceeding ' ~ free_value | string ~ ' ' ~ unit_label
|
||||||
|
}) -%}
|
||||||
|
{%- endmacro -%}
|
||||||
|
|
||||||
|
{%- for role in HYPERVISORS -%}
|
||||||
|
{%- for hypervisor in HYPERVISORS[role].keys() -%}
|
||||||
|
{%- set hw_config = HYPERVISORS[role][hypervisor].hardware -%}
|
||||||
|
{%- set vms = HYPERVISORS[role][hypervisor].vms -%}
|
||||||
|
|
||||||
|
{# Calculate used CPU and memory #}
|
||||||
|
{%- set used_cpu = 0 -%}
|
||||||
|
{%- set used_memory = 0 -%}
|
||||||
|
{%- set ns = namespace(used_cpu=0, used_memory=0) -%}
|
||||||
|
{%- for hostname, vm_data in vms.items() -%}
|
||||||
|
{%- set vm_status = vm_data.get('status', {}).get('status', '') -%}
|
||||||
|
{%- if vm_status != 'Destroyed Instance' -%}
|
||||||
|
{%- set vm_config = vm_data.config -%}
|
||||||
|
{%- set ns.used_cpu = ns.used_cpu + vm_config.get('cpu', 0) | int -%}
|
||||||
|
{%- set ns.used_memory = ns.used_memory + vm_config.get('memory', 0) | int -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
|
||||||
|
{# Calculate available resources #}
|
||||||
|
{%- set cpu_free = hw_config.cpu - ns.used_cpu -%}
|
||||||
|
{%- set mem_free = hw_config.memory - ns.used_memory -%}
|
||||||
|
|
||||||
|
{# Get used PCI indices #}
|
||||||
|
{%- set used_disk = [] -%}
|
||||||
|
{%- set used_copper = [] -%}
|
||||||
|
{%- set used_sfp = [] -%}
|
||||||
|
{%- for hostname, vm in vms.items() -%}
|
||||||
|
{%- set vm_status = vm.get('status', {}).get('status', '') -%}
|
||||||
|
{%- if vm_status != 'Destroyed Instance' -%}
|
||||||
|
{%- set config = vm.get('config', {}) -%}
|
||||||
|
{%- do used_disk.extend(config.get('disk', [])) -%}
|
||||||
|
{%- do used_copper.extend(config.get('copper', [])) -%}
|
||||||
|
{%- do used_sfp.extend(config.get('sfp', [])) -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
|
||||||
|
{# Get available PCI indices #}
|
||||||
|
{%- set disk_free = get_available_pci(hw_config, 'disk', used_disk) -%}
|
||||||
|
{%- set copper_free = get_available_pci(hw_config, 'copper', used_copper) -%}
|
||||||
|
{%- set sfp_free = get_available_pci(hw_config, 'sfp', used_sfp) -%}
|
||||||
|
|
||||||
|
{# Get total resources #}
|
||||||
|
{%- set cpu_total = hw_config.cpu -%}
|
||||||
|
{%- set mem_total = hw_config.memory -%}
|
||||||
|
{%- set disk_total = hw_config.disk.keys() | join('\n') -%}
|
||||||
|
{%- set copper_total = hw_config.copper.keys() | join('\n') -%}
|
||||||
|
{%- set sfp_total = hw_config.sfp.keys() | join('\n') -%}
|
||||||
|
|
||||||
|
{# Update field labels with total and free values #}
|
||||||
|
{%- set updated_template = TEMPLATE.copy() -%}
|
||||||
|
{%- set updated_elements = [] -%}
|
||||||
|
{%- for field in updated_template.uiElements -%}
|
||||||
|
{%- set updated_field = field.copy() -%}
|
||||||
|
{%- if field.field == 'cpu' -%}
|
||||||
|
{%- do update_resource_field(updated_field, cpu_free, cpu_total, 'cores') -%}
|
||||||
|
{%- elif field.field == 'memory' -%}
|
||||||
|
{%- do update_resource_field(updated_field, mem_free, mem_total, 'GB') -%}
|
||||||
|
{%- elif field.field == 'disk' -%}
|
||||||
|
{%- set disk_free_list = disk_free.split(',') if disk_free else [] -%}
|
||||||
|
{%- do updated_field.update({
|
||||||
|
'label': field.label | replace('FREE', disk_free) | replace('TOTAL', disk_total | replace('\n', ',')),
|
||||||
|
'options': disk_free_list
|
||||||
|
}) -%}
|
||||||
|
{%- elif field.field == 'copper' -%}
|
||||||
|
{%- set copper_free_list = copper_free.split(',') if copper_free else [] -%}
|
||||||
|
{%- do updated_field.update({
|
||||||
|
'label': field.label | replace('FREE', copper_free) | replace('TOTAL', copper_total | replace('\n', ',')),
|
||||||
|
'options': copper_free_list
|
||||||
|
}) -%}
|
||||||
|
{%- elif field.field == 'sfp' -%}
|
||||||
|
{%- set sfp_free_list = sfp_free.split(',') if sfp_free else [] -%}
|
||||||
|
{%- do updated_field.update({
|
||||||
|
'label': field.label | replace('FREE', sfp_free) | replace('TOTAL', sfp_total | replace('\n', ',')),
|
||||||
|
'options': sfp_free_list
|
||||||
|
}) -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- do updated_elements.append(updated_field) -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
{%- if baseDomainStatus == 'Initialized' %}
|
||||||
|
{%- do updated_template.update({'uiElements': updated_elements}) -%}
|
||||||
|
{%- else -%}
|
||||||
|
{%- do updated_template.pop('uiElements') -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- do updated_template.update({
|
||||||
|
'title': hypervisor,
|
||||||
|
'description': update_description(
|
||||||
|
hypervisor,
|
||||||
|
cpu_free,
|
||||||
|
mem_free,
|
||||||
|
disk_free,
|
||||||
|
copper_free,
|
||||||
|
sfp_free,
|
||||||
|
vms,
|
||||||
|
cpu_total,
|
||||||
|
mem_total,
|
||||||
|
disk_total,
|
||||||
|
copper_total,
|
||||||
|
sfp_total
|
||||||
|
)
|
||||||
|
}) -%}
|
||||||
|
{%- do ANNOTATION.hypervisor.hosts.update({hypervisor ~ 'VMs': updated_template}) -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
{%- endfor -%}
|
||||||
|
|
||||||
|
{{- ANNOTATION | yaml(False) -}}
|
||||||
|
|
||||||
|
{%- else -%}
|
||||||
|
|
||||||
|
{%- do salt.log.error(
|
||||||
|
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||||
|
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||||
|
'for more information about purchasing a license to enable this feature.'
|
||||||
|
) -%}
|
||||||
|
|
||||||
|
{%- endif -%}
|
||||||
96
salt/soc/dyanno/hypervisor/write_status.sls
Normal file
96
salt/soc/dyanno/hypervisor/write_status.sls
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||||
|
|
||||||
|
{# Import the process steps from map.jinja #}
|
||||||
|
{% from 'soc/dyanno/hypervisor/map.jinja' import PROCESS_STEPS %}
|
||||||
|
|
||||||
|
{% do salt.log.info('soc/dyanno/hypervisor/write_status: Running') %}
|
||||||
|
{% set vm_name = pillar.get('vm_name') %}
|
||||||
|
{% set hypervisor = pillar.get('hypervisor') %}
|
||||||
|
{% set status_data = pillar.get('status_data', {}) %}
|
||||||
|
{% set event_tag = pillar.get('event_tag') %}
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: tag: ' ~ event_tag) %}
|
||||||
|
{% set base_path = '/opt/so/saltstack/local/salt/hypervisor/hosts' %}
|
||||||
|
{% set status_dir = base_path ~ '/' ~ hypervisor %}
|
||||||
|
{% set status_file = status_dir ~ '/' ~ vm_name ~ '.status' %}
|
||||||
|
|
||||||
|
{% set new_index = PROCESS_STEPS.index(status_data.get('status')) %}
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: new_index: ' ~ new_index|string) %}
|
||||||
|
|
||||||
|
# Function to read and parse current JSON status file
|
||||||
|
{% macro get_current_status(status_file) %}
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: getting current status from file: ' ~ status_file) %}
|
||||||
|
|
||||||
|
{% set rel_path_status_file = 'hypervisor/hosts' ~ '/' ~ hypervisor ~ '/' ~ vm_name ~ '.status' %}
|
||||||
|
{# If the status file doesn't exist, then we are just now Processing, so return -1 #}
|
||||||
|
{% if salt['file.file_exists'](status_file)%}
|
||||||
|
{% import_json rel_path_status_file as current_status %}
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: current status: ' ~ current_status) %}
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: current status: ' ~ current_status.get('status')) %}
|
||||||
|
{% if current_status.get('status') in PROCESS_STEPS %}
|
||||||
|
{% set current_index = PROCESS_STEPS.index(current_status.get('status')) %}
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: current_index: ' ~ current_index|string) %}
|
||||||
|
{%- set return_value = current_index -%}
|
||||||
|
{% else %}
|
||||||
|
{%- set return_value = -1 -%}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
{% set return_value = -1 %}
|
||||||
|
{% endif %}
|
||||||
|
{{- return_value -}}
|
||||||
|
{% endmacro %}
|
||||||
|
|
||||||
|
{% set current_index = get_current_status(status_file)|int %}
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: ' ~ status_file ~ ' current status index: ' ~ current_index|string) %}
|
||||||
|
|
||||||
|
ensure_status_dir:
|
||||||
|
file.directory:
|
||||||
|
- name: {{ status_dir }}
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- mode: 755
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
|
||||||
|
{# Some of the status updates trigger within a second of each other can can cause, for example, IP Configuration orchestration to process before the Processing #}
|
||||||
|
{# This check has been put in place to ensure a status sooner in the process can't overwrite this file if a status later in the process wrote to it first. #}
|
||||||
|
{# The final step is Destroyed, so we allow Processing to overwrite that incase someone creates a new VM with same name that was previously destroyed. #}
|
||||||
|
{% if new_index > current_index or (current_index == PROCESS_STEPS | length - 1 and new_index == 0) %}
|
||||||
|
write_status_file:
|
||||||
|
file.serialize:
|
||||||
|
- name: {{ status_file }}
|
||||||
|
- dataset: {{ status_data|json }}
|
||||||
|
- formatter: json
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- mode: 600
|
||||||
|
- indent: 2
|
||||||
|
- require:
|
||||||
|
- file: ensure_status_dir
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{% do salt.log.debug('soc/dyanno/hypervisor/write_status: File not written. ' ~ PROCESS_STEPS[new_index] ~ ' cannot overwrite ' ~ PROCESS_STEPS[current_index] ~ '.' ) %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% do salt.log.info('soc/dyanno/hypervisor/write_status: Completed') %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{% do salt.log.error(
|
||||||
|
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||||
|
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||||
|
'for more information about purchasing a license to enable this feature.'
|
||||||
|
) %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
{% set COMMONNAME = GLOBALS.manager %}
|
{% set COMMONNAME = GLOBALS.manager %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import'] %}
|
{% if GLOBALS.is_manager %}
|
||||||
include:
|
include:
|
||||||
- ca
|
- ca
|
||||||
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
|
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
|
||||||
@@ -99,7 +99,7 @@ influxkeyperms:
|
|||||||
- mode: 640
|
- mode: 640
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||||
# Create a cert for Redis encryption
|
# Create a cert for Redis encryption
|
||||||
redis_key:
|
redis_key:
|
||||||
x509.private_key_managed:
|
x509.private_key_managed:
|
||||||
@@ -139,7 +139,7 @@ rediskeyperms:
|
|||||||
- group: 939
|
- group: 939
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||||
|
|
||||||
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
|
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
|
||||||
# Start -- Elastic Fleet Host Cert
|
# Start -- Elastic Fleet Host Cert
|
||||||
@@ -388,7 +388,7 @@ chownelasticfleetagentkey:
|
|||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-receiver'] %}
|
||||||
etc_filebeat_key:
|
etc_filebeat_key:
|
||||||
x509.private_key_managed:
|
x509.private_key_managed:
|
||||||
- name: /etc/pki/filebeat.key
|
- name: /etc/pki/filebeat.key
|
||||||
@@ -552,7 +552,7 @@ elasticp12perms:
|
|||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains['role'] in ['so-sensor', 'so-manager', 'so-searchnode', 'so-eval', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-idh', 'so-import', 'so-receiver'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-sensor', 'so-searchnode', 'so-heavynode', 'so-fleet', 'so-idh', 'so-receiver'] %}
|
||||||
|
|
||||||
fbcertdir:
|
fbcertdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
@@ -663,7 +663,7 @@ elastickeyperms:
|
|||||||
- group: 930
|
- group: 930
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
|
||||||
elasticfleet_kafka_key:
|
elasticfleet_kafka_key:
|
||||||
x509.private_key_managed:
|
x509.private_key_managed:
|
||||||
- name: /etc/pki/elasticfleet-kafka.key
|
- name: /etc/pki/elasticfleet-kafka.key
|
||||||
|
|||||||
256
salt/storage/files/so-nsm-cleanup
Normal file
256
salt/storage/files/so-nsm-cleanup
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# Usage:
|
||||||
|
# so-nsm-cleanup
|
||||||
|
#
|
||||||
|
# Options:
|
||||||
|
# None - script automatically detects and cleans NVMe devices
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# 1. Clean NVMe devices and LVM configuration:
|
||||||
|
# ```bash
|
||||||
|
# sudo so-nsm-cleanup
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# Notes:
|
||||||
|
# - Requires root privileges
|
||||||
|
# - CAUTION: This script will destroy all data on NVMe devices
|
||||||
|
# - Removes:
|
||||||
|
# * /nsm mount point
|
||||||
|
# * LVM configuration
|
||||||
|
# * Partitions and signatures
|
||||||
|
# - Safe to run multiple times
|
||||||
|
#
|
||||||
|
# Description:
|
||||||
|
# This script cleans up NVMe devices and LVM configuration to prepare
|
||||||
|
# for testing so-nsm-mount. It performs these steps:
|
||||||
|
#
|
||||||
|
# 1. Safety Checks:
|
||||||
|
# - Verifies root privileges
|
||||||
|
# - Detects NVMe devices
|
||||||
|
# - Warns about data loss
|
||||||
|
#
|
||||||
|
# 2. Cleanup Operations:
|
||||||
|
# - Unmounts and removes /nsm
|
||||||
|
# - Removes LVM configuration
|
||||||
|
# - Cleans partitions and signatures
|
||||||
|
# - Zeros out partition tables
|
||||||
|
#
|
||||||
|
# Exit Codes:
|
||||||
|
# 0: Success - cleanup completed
|
||||||
|
# 1: Error conditions:
|
||||||
|
# - Must be run as root
|
||||||
|
# - No NVMe devices found
|
||||||
|
# - Cleanup operation failed
|
||||||
|
#
|
||||||
|
# Logging:
|
||||||
|
# - All operations logged to both console and /opt/so/log/so-nsm-cleanup.log
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
LOG_FILE="/opt/so/log/so-nsm-cleanup.log"
|
||||||
|
MOUNT_POINT="/nsm"
|
||||||
|
VG_NAME="system"
|
||||||
|
LV_NAME="nsm"
|
||||||
|
|
||||||
|
# Function to log messages
|
||||||
|
log() {
|
||||||
|
local msg="$(date '+%Y-%m-%d %H:%M:%S') $1"
|
||||||
|
echo "$msg" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to log command output
|
||||||
|
log_cmd() {
|
||||||
|
local cmd="$1"
|
||||||
|
local output
|
||||||
|
output=$($cmd 2>&1)
|
||||||
|
if [ -n "$output" ]; then
|
||||||
|
log "$2:"
|
||||||
|
echo "$output" | while IFS= read -r line; do
|
||||||
|
log " $line"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if running as root
|
||||||
|
check_root() {
|
||||||
|
if [ "$EUID" -ne 0 ]; then
|
||||||
|
log "Error: Failed to execute - script must be run as root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to detect NVMe devices
|
||||||
|
detect_nvme_devices() {
|
||||||
|
local -a devices=()
|
||||||
|
|
||||||
|
{
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Starting NVMe device detection"
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
# Get list of NVMe devices
|
||||||
|
while read -r dev; do
|
||||||
|
if [[ -b "$dev" ]]; then
|
||||||
|
devices+=("$dev")
|
||||||
|
fi
|
||||||
|
done < <(find /dev -name 'nvme*n1' 2>/dev/null)
|
||||||
|
|
||||||
|
if [ ${#devices[@]} -eq 0 ]; then
|
||||||
|
log "Error: No NVMe devices found"
|
||||||
|
log "----------------------------------------"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Found ${#devices[@]} NVMe device(s):"
|
||||||
|
for dev in "${devices[@]}"; do
|
||||||
|
local size=$(lsblk -dbn -o SIZE "$dev" 2>/dev/null | numfmt --to=iec)
|
||||||
|
log " - $dev ($size)"
|
||||||
|
done
|
||||||
|
log "----------------------------------------"
|
||||||
|
} >&2
|
||||||
|
|
||||||
|
# Only output device paths to stdout
|
||||||
|
printf '%s\n' "${devices[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to cleanup mount point
|
||||||
|
cleanup_mount() {
|
||||||
|
log "Cleaning up mount point $MOUNT_POINT"
|
||||||
|
|
||||||
|
if mountpoint -q "$MOUNT_POINT" 2>/dev/null; then
|
||||||
|
log " Unmounting $MOUNT_POINT"
|
||||||
|
if ! umount "$MOUNT_POINT" 2>/dev/null; then
|
||||||
|
log " WARNING: Failed to unmount $MOUNT_POINT"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
log " Successfully unmounted"
|
||||||
|
else
|
||||||
|
log " Not mounted - skipping unmount"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -d "$MOUNT_POINT" ]]; then
|
||||||
|
log " Removing directory"
|
||||||
|
rm -rf "$MOUNT_POINT"
|
||||||
|
log " Directory removed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to cleanup LVM
|
||||||
|
cleanup_lvm() {
|
||||||
|
log "Cleaning up LVM configuration"
|
||||||
|
|
||||||
|
# Remove logical volume if it exists
|
||||||
|
if lvs "$VG_NAME/$LV_NAME" &>/dev/null; then
|
||||||
|
log " Removing logical volume $VG_NAME/$LV_NAME"
|
||||||
|
if ! lvremove -f "$VG_NAME/$LV_NAME" 2>/dev/null; then
|
||||||
|
log " WARNING: Failed to remove logical volume"
|
||||||
|
else
|
||||||
|
log " Logical volume removed"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove volume group if it exists
|
||||||
|
if vgs "$VG_NAME" &>/dev/null; then
|
||||||
|
log " Removing volume group $VG_NAME"
|
||||||
|
if ! vgremove -f "$VG_NAME" 2>/dev/null; then
|
||||||
|
log " WARNING: Failed to remove volume group"
|
||||||
|
else
|
||||||
|
log " Volume group removed"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to cleanup a device
|
||||||
|
cleanup_device() {
|
||||||
|
local device=$1
|
||||||
|
|
||||||
|
if [[ ! -b "$device" ]]; then
|
||||||
|
log "ERROR: Invalid device path: $device"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local size=$(lsblk -dbn -o SIZE "$device" 2>/dev/null | numfmt --to=iec)
|
||||||
|
log "Processing device: $device ($size)"
|
||||||
|
|
||||||
|
# Remove physical volume if it exists
|
||||||
|
if pvs "$device" &>/dev/null; then
|
||||||
|
log " Removing physical volume"
|
||||||
|
if ! pvremove -ff -y "$device" 2>/dev/null; then
|
||||||
|
log " WARNING: Failed to remove physical volume"
|
||||||
|
else
|
||||||
|
log " Physical volume removed"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean all signatures and partitions
|
||||||
|
log " Cleaning signatures and partitions"
|
||||||
|
if ! wipefs -a "$device" 2>/dev/null; then
|
||||||
|
log " WARNING: Failed to clean signatures"
|
||||||
|
else
|
||||||
|
log " Signatures cleaned"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Zero out partition table
|
||||||
|
log " Zeroing partition table"
|
||||||
|
if ! dd if=/dev/zero of="$device" bs=1M count=10 status=none; then
|
||||||
|
log " WARNING: Failed to zero partition table"
|
||||||
|
else
|
||||||
|
log " Partition table zeroed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log " Device cleanup completed"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
check_root
|
||||||
|
|
||||||
|
log "Starting NVMe device cleanup"
|
||||||
|
log "WARNING: This will destroy all data on NVMe devices!"
|
||||||
|
log ""
|
||||||
|
|
||||||
|
# Log initial system state
|
||||||
|
log "Initial system state:"
|
||||||
|
log_cmd "lsblk" "Block devices"
|
||||||
|
log_cmd "pvs" "Physical volumes"
|
||||||
|
log_cmd "vgs" "Volume groups"
|
||||||
|
log_cmd "lvs" "Logical volumes"
|
||||||
|
log ""
|
||||||
|
|
||||||
|
# Clean up mount point
|
||||||
|
cleanup_mount
|
||||||
|
|
||||||
|
# Clean up LVM configuration
|
||||||
|
cleanup_lvm
|
||||||
|
|
||||||
|
# Detect and clean up devices
|
||||||
|
local -a devices=()
|
||||||
|
mapfile -t devices < <(detect_nvme_devices)
|
||||||
|
|
||||||
|
log "Starting device cleanup"
|
||||||
|
for device in "${devices[@]}"; do
|
||||||
|
cleanup_device "$device"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Log final system state
|
||||||
|
log ""
|
||||||
|
log "Final system state:"
|
||||||
|
log_cmd "lsblk" "Block devices"
|
||||||
|
log_cmd "pvs" "Physical volumes"
|
||||||
|
log_cmd "vgs" "Volume groups"
|
||||||
|
log_cmd "lvs" "Logical volumes"
|
||||||
|
|
||||||
|
log ""
|
||||||
|
log "Cleanup completed successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main "$@"
|
||||||
969
salt/storage/files/so-nsm-mount
Normal file
969
salt/storage/files/so-nsm-mount
Normal file
@@ -0,0 +1,969 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# Usage:
|
||||||
|
# so-nsm-mount
|
||||||
|
#
|
||||||
|
# Options:
|
||||||
|
# None - script automatically detects and configures NVMe devices
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# 1. Configure and mount NVMe devices:
|
||||||
|
# ```bash
|
||||||
|
# sudo so-nsm-mount
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# Notes:
|
||||||
|
# - Requires root privileges
|
||||||
|
# - Automatically detects unmounted NVMe devices
|
||||||
|
# - Handles multiple NVMe devices:
|
||||||
|
# * Creates PV from each device
|
||||||
|
# * Combines all devices into single volume group
|
||||||
|
# * Creates single logical volume using total space
|
||||||
|
# - Safely handles existing LVM configurations:
|
||||||
|
# * Preserves proper existing configurations
|
||||||
|
# * Provides cleanup instructions if conflicts found
|
||||||
|
# - Creates or extends LVM configuration if no conflicts
|
||||||
|
# - Uses XFS filesystem
|
||||||
|
# - Configures persistent mount via /etc/fstab
|
||||||
|
# - Safe to run multiple times
|
||||||
|
#
|
||||||
|
# Description:
|
||||||
|
# This script automates the configuration and mounting of NVMe devices
|
||||||
|
# as /nsm in Security Onion virtual machines. It performs these steps:
|
||||||
|
#
|
||||||
|
# Dependencies:
|
||||||
|
# - dmidecode: Required for getting system UUID
|
||||||
|
# - nvme-cli: Required for NVMe secure erase operations
|
||||||
|
# - lvm2: Required for LVM operations
|
||||||
|
# - xfsprogs: Required for XFS filesystem operations
|
||||||
|
#
|
||||||
|
# 1. Safety Checks:
|
||||||
|
# - Verifies root privileges
|
||||||
|
# - Checks if /nsm is already mounted
|
||||||
|
# - Detects available unmounted NVMe devices
|
||||||
|
#
|
||||||
|
# 2. LVM Configuration Check:
|
||||||
|
# - If device is part of "system" VG with "nsm" LV:
|
||||||
|
# * Uses existing configuration
|
||||||
|
# * Exits successfully
|
||||||
|
# - If device is part of different LVM configuration:
|
||||||
|
# * Logs current configuration details
|
||||||
|
# * Provides specific cleanup instructions
|
||||||
|
# * Exits with error to prevent data loss
|
||||||
|
#
|
||||||
|
# 3. New Configuration (if no conflicts):
|
||||||
|
# - Creates physical volume on each NVMe device
|
||||||
|
# - Combines all devices into single "system" volume group
|
||||||
|
# - Creates single "nsm" logical volume using total space
|
||||||
|
# - Creates XFS filesystem
|
||||||
|
# - Updates /etc/fstab for persistence
|
||||||
|
# - Mounts the filesystem as /nsm
|
||||||
|
#
|
||||||
|
# Exit Codes:
|
||||||
|
# 0: Success conditions:
|
||||||
|
# - Devices configured and mounted
|
||||||
|
# - Already properly mounted
|
||||||
|
# 1: Error conditions:
|
||||||
|
# - Must be run as root
|
||||||
|
# - No available NVMe devices found
|
||||||
|
# - Device has conflicting LVM configuration
|
||||||
|
# - Device preparation failed
|
||||||
|
# - LVM operation failed
|
||||||
|
# - Filesystem/mount operation failed
|
||||||
|
#
|
||||||
|
# Logging:
|
||||||
|
# - All operations logged to both console and /opt/so/log/so-nsm-mount.log
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
LOG_FILE="/opt/so/log/so-nsm-mount.log"
|
||||||
|
VG_NAME=""
|
||||||
|
LV_NAME="nsm"
|
||||||
|
MOUNT_POINT="/nsm"
|
||||||
|
|
||||||
|
# Function to log messages
|
||||||
|
log() {
|
||||||
|
echo "$(date '+%Y-%m-%d %H:%M:%S') $1" | tee -a "$LOG_FILE" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to log errors
|
||||||
|
log_error() {
|
||||||
|
echo "$(date '+%Y-%m-%d %H:%M:%S') ERROR: $1" | tee -a "$LOG_FILE" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to log command output
|
||||||
|
log_cmd() {
|
||||||
|
local cmd="$1"
|
||||||
|
local desc="$2"
|
||||||
|
local output
|
||||||
|
local ret=0
|
||||||
|
|
||||||
|
output=$(eval "$cmd" 2>&1) || ret=$?
|
||||||
|
|
||||||
|
if [ -n "$output" ]; then
|
||||||
|
log "$desc:"
|
||||||
|
printf '%s\n' "$output" | sed 's/^/ /' | while IFS= read -r line; do
|
||||||
|
log "$line"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
[ $ret -eq 0 ] || log_error "Command failed with exit code $ret: $cmd"
|
||||||
|
return $ret
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get system UUID for unique VG naming
|
||||||
|
get_system_uuid() {
|
||||||
|
local uuid
|
||||||
|
|
||||||
|
if ! uuid=$(dmidecode -s system-uuid 2>/dev/null); then
|
||||||
|
log_error "Failed to get system UUID"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Just convert hyphens to underscores
|
||||||
|
echo "${uuid//-/_}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Convert VG name back to UUID format
|
||||||
|
vg_name_to_uuid() {
|
||||||
|
local vg=$1
|
||||||
|
# Just convert underscores back to hyphens
|
||||||
|
echo "$vg" | tr '_' '-'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to perform secure erase of NVMe device
|
||||||
|
secure_erase_nvme() {
|
||||||
|
local device=$1
|
||||||
|
local ret=0
|
||||||
|
local retry=3
|
||||||
|
|
||||||
|
log "Performing secure erase of NVMe device $device"
|
||||||
|
|
||||||
|
if [[ ! "$device" =~ ^/dev/nvme[0-9]+n[0-9]+$ ]]; then
|
||||||
|
log_error "Device $device is not an NVMe device"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if device is mounted
|
||||||
|
if mountpoint -q "$device" || findmnt -n | grep -q "$device"; then
|
||||||
|
log_error "Device $device is mounted, cannot secure erase"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Attempt secure erase with retries
|
||||||
|
while [ $retry -gt 0 ]; do
|
||||||
|
log " Executing secure erase command (attempt $((4-retry))/3)"
|
||||||
|
if nvme format "$device" --namespace-id 1 --ses 1 --lbaf 0 --force 2>nvme.err; then
|
||||||
|
log " Success: Secure erase completed"
|
||||||
|
rm -f nvme.err
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check error type
|
||||||
|
if grep -q "Device or resource busy" nvme.err; then
|
||||||
|
log " Device busy, waiting before retry"
|
||||||
|
sleep 3
|
||||||
|
else
|
||||||
|
log_error "Secure erase failed"
|
||||||
|
log " Details: $(cat nvme.err)"
|
||||||
|
rm -f nvme.err
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
retry=$((retry - 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
log_error "Failed to secure erase device after 3 attempts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if running as root
|
||||||
|
check_root() {
|
||||||
|
if [ "$EUID" -ne 0 ]; then
|
||||||
|
log_error "Failed to execute - script must be run as root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check LVM configuration of a device
|
||||||
|
check_lvm_config() {
|
||||||
|
local device=$1
|
||||||
|
local vg_name
|
||||||
|
local lv_name
|
||||||
|
|
||||||
|
log "Checking LVM configuration for $device"
|
||||||
|
|
||||||
|
# Log device details
|
||||||
|
log_cmd "lsblk -o NAME,SIZE,TYPE,MOUNTPOINT $device" "Device details"
|
||||||
|
|
||||||
|
# Check if device is a PV
|
||||||
|
if ! pvs "$device" &>/dev/null; then
|
||||||
|
log "Device is not a physical volume"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Log PV details
|
||||||
|
log_cmd "pvs --noheadings -o pv_name,vg_name,pv_size,pv_used $device" "Physical volume details"
|
||||||
|
|
||||||
|
# Get VG name if any
|
||||||
|
vg_name=$(pvs --noheadings -o vg_name "$device" | tr -d ' ')
|
||||||
|
if [ -z "$vg_name" ]; then
|
||||||
|
log "Device is not part of any volume group"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Safety check - never touch system VGs
|
||||||
|
if is_system_vg "$vg_name"; then
|
||||||
|
log_error "Device $device is part of system VG: $vg_name"
|
||||||
|
log "Cannot modify system volume groups. Aborting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Log VG details
|
||||||
|
log_cmd "vgs --noheadings -o vg_name,vg_size,vg_free,pv_count $vg_name" "Volume group details"
|
||||||
|
|
||||||
|
# If it's our expected configuration
|
||||||
|
if [ "$vg_name" = "$VG_NAME" ]; then
|
||||||
|
if lvs "$VG_NAME/$LV_NAME" &>/dev/null; then
|
||||||
|
# Log LV details
|
||||||
|
log_cmd "lvs --noheadings -o lv_name,lv_size,lv_path $VG_NAME/$LV_NAME" "Logical volume details"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get all LVs in the VG
|
||||||
|
local lvs_in_vg=$(lvs --noheadings -o lv_name "$vg_name" 2>/dev/null | tr '\n' ',' | sed 's/,$//')
|
||||||
|
|
||||||
|
log_error "Device $device is part of existing LVM configuration:"
|
||||||
|
log " Volume Group: $vg_name"
|
||||||
|
log " Logical Volumes: ${lvs_in_vg:-none}"
|
||||||
|
log ""
|
||||||
|
log "To preserve data safety, no changes will be made."
|
||||||
|
log ""
|
||||||
|
log "If you want to repurpose this device for /nsm, verify it's safe to proceed:"
|
||||||
|
log "1. Check current usage: lsblk $device"
|
||||||
|
log "2. Then run this command to clean up (CAUTION: THIS WILL DESTROY ALL DATA):"
|
||||||
|
log " umount $MOUNT_POINT 2>/dev/null; "
|
||||||
|
for lv in $(echo "$lvs_in_vg" | tr ',' ' '); do
|
||||||
|
log " lvremove -f /dev/$vg_name/$lv; "
|
||||||
|
done
|
||||||
|
log " vgreduce $vg_name $device && pvremove -ff -y $device && wipefs -a $device"
|
||||||
|
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check if VG is system critical
|
||||||
|
is_system_vg() {
|
||||||
|
local vg=$1
|
||||||
|
local root_dev
|
||||||
|
local root_vg
|
||||||
|
local mp
|
||||||
|
local dev
|
||||||
|
|
||||||
|
# First check if it's the current root VG
|
||||||
|
root_dev=$(findmnt -n -o SOURCE /)
|
||||||
|
if [ -n "$root_dev" ]; then
|
||||||
|
# Get VG name from root device
|
||||||
|
if lvs --noheadings -o vg_name "$root_dev" 2>/dev/null | grep -q "^$vg$"; then
|
||||||
|
return 0 # true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check all mounted LVM devices
|
||||||
|
while read -r mp; do
|
||||||
|
# Skip our NSM mount
|
||||||
|
[ "$mp" = "$MOUNT_POINT" ] && continue
|
||||||
|
|
||||||
|
# Check if mount uses this VG
|
||||||
|
if lvs --noheadings -o vg_name "$mp" 2>/dev/null | grep -q "^$vg$"; then
|
||||||
|
return 0 # true
|
||||||
|
fi
|
||||||
|
done < <(findmnt -n -o SOURCE -t ext4,xfs,btrfs,swap | grep "/dev/mapper/")
|
||||||
|
|
||||||
|
# Check if VG contains any mounted devices
|
||||||
|
while read -r dev; do
|
||||||
|
if [ -n "$dev" ] && findmnt -n | grep -q "$dev"; then
|
||||||
|
return 0 # true
|
||||||
|
fi
|
||||||
|
done < <(lvs "/dev/$vg" --noheadings -o lv_path 2>/dev/null)
|
||||||
|
|
||||||
|
# Check if VG contains critical LV names
|
||||||
|
if lvs "/dev/$vg" &>/dev/null; then
|
||||||
|
if lvs --noheadings -o lv_name "/dev/$vg" 2>/dev/null | grep -qE '^(root|swap|home|var|usr|tmp|opt|srv|boot)$'; then
|
||||||
|
return 0 # true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if VG has common system names
|
||||||
|
if [[ "$vg" =~ ^(vg_main|system|root|os|rhel|centos|ubuntu|debian|fedora)$ ]]; then
|
||||||
|
return 0 # true
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 1 # false
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to deactivate LVM on device
|
||||||
|
deactivate_lvm() {
|
||||||
|
local device=$1
|
||||||
|
local vg=$2
|
||||||
|
|
||||||
|
# Safety check - never touch system VGs
|
||||||
|
if is_system_vg "$vg"; then
|
||||||
|
log_error "Refusing to deactivate system VG: $vg"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log " Deactivating LVM on device $device (VG: $vg)"
|
||||||
|
|
||||||
|
# Get list of LVs that specifically use this device
|
||||||
|
local lvs_to_deactivate
|
||||||
|
lvs_to_deactivate=$(pvs --noheadings -o vg_name,lv_name "$device" 2>/dev/null | awk '{print $1"/"$2}')
|
||||||
|
|
||||||
|
# Deactivate only LVs that use this device
|
||||||
|
if [ -n "$lvs_to_deactivate" ]; then
|
||||||
|
log " Deactivating logical volumes on device"
|
||||||
|
while read -r lv; do
|
||||||
|
if [ -n "$lv" ]; then
|
||||||
|
log " Deactivating: $lv"
|
||||||
|
if ! lvchange -an "/dev/$lv" 2>/dev/null; then
|
||||||
|
log " WARNING: Failed to deactivate $lv"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done <<< "$lvs_to_deactivate"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# No need to attempt VG removal - secure erase will handle it
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to cleanup device
|
||||||
|
cleanup_device() {
|
||||||
|
local device=$1
|
||||||
|
local ret=0
|
||||||
|
|
||||||
|
log "Cleaning up device $device"
|
||||||
|
|
||||||
|
# Check if device belongs to current system
|
||||||
|
if pvs "$device" &>/dev/null; then
|
||||||
|
local vg=$(pvs --noheadings -o vg_name "$device" | tr -d ' ')
|
||||||
|
local current_vg=$(get_system_uuid)
|
||||||
|
local vg_uuid=""
|
||||||
|
local current_uuid=""
|
||||||
|
|
||||||
|
if [[ -n "$vg" ]]; then
|
||||||
|
# Convert VG names to UUIDs for comparison
|
||||||
|
vg_uuid=$(vg_name_to_uuid "$vg")
|
||||||
|
current_uuid=$(vg_name_to_uuid "$current_vg")
|
||||||
|
|
||||||
|
if [[ "$vg_uuid" == "$current_uuid" ]]; then
|
||||||
|
log " Device belongs to current system, skipping secure erase"
|
||||||
|
else
|
||||||
|
log " Device belongs to different system (VG: $vg)"
|
||||||
|
|
||||||
|
# First deactivate LVM
|
||||||
|
if ! deactivate_lvm "$device" "$vg"; then
|
||||||
|
log_error "Failed to fully deactivate LVM"
|
||||||
|
ret=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Attempt secure erase even if LVM cleanup had issues
|
||||||
|
log " Performing secure erase"
|
||||||
|
if ! secure_erase_nvme "$device"; then
|
||||||
|
log_error "Failed to secure erase device"
|
||||||
|
ret=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# No LVM configuration found, perform secure erase
|
||||||
|
log " No LVM configuration found, performing secure erase"
|
||||||
|
if ! secure_erase_nvme "$device"; then
|
||||||
|
log_error "Failed to secure erase device"
|
||||||
|
ret=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Always attempt to remove partitions and signatures
|
||||||
|
log " Removing partitions and signatures"
|
||||||
|
if ! wipefs -a "$device" 2>/dev/null; then
|
||||||
|
log_error "Failed to remove signatures"
|
||||||
|
ret=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $ret -eq 0 ]; then
|
||||||
|
log " Device cleanup successful"
|
||||||
|
else
|
||||||
|
log_error "Device cleanup had some issues"
|
||||||
|
fi
|
||||||
|
return $ret
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to validate device state
|
||||||
|
validate_device_state() {
|
||||||
|
local device=$1
|
||||||
|
|
||||||
|
if [[ ! -b "$device" ]]; then
|
||||||
|
log_error "$device is not a valid block device"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if device is already properly configured
|
||||||
|
if pvs "$device" &>/dev/null; then
|
||||||
|
local vg=$(pvs --noheadings -o vg_name "$device" | tr -d ' ')
|
||||||
|
|
||||||
|
# Safety check - never touch system VGs
|
||||||
|
if is_system_vg "$vg"; then
|
||||||
|
log_error "Device $device is part of system VG: $vg"
|
||||||
|
log "Cannot modify system volume groups. Aborting."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Convert VG names to UUIDs for comparison
|
||||||
|
local vg_uuid=$(vg_name_to_uuid "$vg")
|
||||||
|
local current_uuid=$(vg_name_to_uuid "$VG_NAME")
|
||||||
|
|
||||||
|
if [[ "$vg_uuid" == "$current_uuid" ]]; then
|
||||||
|
if lvs "$vg/$LV_NAME" &>/dev/null; then
|
||||||
|
log "Device $device is already properly configured in VG $vg"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for existing partitions or LVM
|
||||||
|
if pvs "$device" &>/dev/null || lsblk -no TYPE "$device" | grep -q "part"; then
|
||||||
|
# Check if device is mounted as root filesystem
|
||||||
|
if mountpoint -q / && findmnt -n -o SOURCE / | grep -q "$device"; then
|
||||||
|
log_error "Device $device contains root filesystem. Aborting."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Device $device has existing configuration"
|
||||||
|
if ! cleanup_device "$device"; then
|
||||||
|
log "Failed to cleanup device $device"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to log device details
|
||||||
|
log_device_details() {
|
||||||
|
local device=$1
|
||||||
|
local size mount fs_type vg_name
|
||||||
|
|
||||||
|
size=$(lsblk -dbn -o SIZE "$device" 2>/dev/null | numfmt --to=iec)
|
||||||
|
mount=$(lsblk -no MOUNTPOINT "$device" 2>/dev/null)
|
||||||
|
fs_type=$(lsblk -no FSTYPE "$device" 2>/dev/null)
|
||||||
|
|
||||||
|
log "Device details for $device:"
|
||||||
|
log " Size: $size"
|
||||||
|
log " Filesystem: ${fs_type:-none}"
|
||||||
|
log " Mountpoint: ${mount:-none}"
|
||||||
|
|
||||||
|
if pvs "$device" &>/dev/null; then
|
||||||
|
vg_name=$(pvs --noheadings -o vg_name "$device" | tr -d ' ')
|
||||||
|
log " LVM status: Physical volume in VG ${vg_name:-none}"
|
||||||
|
else
|
||||||
|
log " LVM status: Not a physical volume"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to detect NVMe devices
|
||||||
|
detect_nvme_devices() {
|
||||||
|
local -a devices=()
|
||||||
|
local -a available_devices=()
|
||||||
|
local -a configured_devices=()
|
||||||
|
|
||||||
|
{
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Starting NVMe device detection"
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
# First get a clean list of devices
|
||||||
|
while read -r dev; do
|
||||||
|
if [[ -b "$dev" ]]; then
|
||||||
|
devices+=("$dev")
|
||||||
|
fi
|
||||||
|
done < <(find /dev -name 'nvme*n1' 2>/dev/null)
|
||||||
|
|
||||||
|
if [ ${#devices[@]} -eq 0 ]; then
|
||||||
|
log_error "No NVMe devices found"
|
||||||
|
log "----------------------------------------"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Found ${#devices[@]} NVMe device(s)"
|
||||||
|
|
||||||
|
# Process and validate each device
|
||||||
|
for dev in "${devices[@]}"; do
|
||||||
|
log_device_details "$dev"
|
||||||
|
|
||||||
|
if validate_device_state "$dev"; then
|
||||||
|
if pvs "$dev" &>/dev/null; then
|
||||||
|
local vg=$(pvs --noheadings -o vg_name "$dev" | tr -d ' ')
|
||||||
|
local vg_uuid=$(vg_name_to_uuid "$vg")
|
||||||
|
local current_uuid=$(vg_name_to_uuid "$VG_NAME")
|
||||||
|
|
||||||
|
if [[ "$vg_uuid" == "$current_uuid" ]]; then
|
||||||
|
configured_devices+=("$dev")
|
||||||
|
log "Status: Already configured in VG $vg"
|
||||||
|
else
|
||||||
|
available_devices+=("$dev")
|
||||||
|
log "Status: Available for use"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
available_devices+=("$dev")
|
||||||
|
log "Status: Available for use"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log "Status: Not available (see previous messages)"
|
||||||
|
fi
|
||||||
|
log "----------------------------------------"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#configured_devices[@]} -gt 0 ]; then
|
||||||
|
log "Found ${#configured_devices[@]} device(s) already configured:"
|
||||||
|
for dev in "${configured_devices[@]}"; do
|
||||||
|
local size=$(lsblk -dbn -o SIZE "$dev" 2>/dev/null | numfmt --to=iec)
|
||||||
|
log " - $dev ($size)"
|
||||||
|
done
|
||||||
|
log "Proceeding with mount setup"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ${#available_devices[@]} -eq 0 ]; then
|
||||||
|
log_error "No available NVMe devices found"
|
||||||
|
log "----------------------------------------"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Summary: ${#available_devices[@]} device(s) available for use"
|
||||||
|
for dev in "${available_devices[@]}"; do
|
||||||
|
local size=$(lsblk -dbn -o SIZE "$dev" 2>/dev/null | numfmt --to=iec)
|
||||||
|
log " - $dev ($size)"
|
||||||
|
done
|
||||||
|
log "----------------------------------------"
|
||||||
|
} >&2
|
||||||
|
|
||||||
|
# Return array elements one per line
|
||||||
|
printf '%s\n' "${available_devices[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to prepare devices for LVM
|
||||||
|
prepare_devices() {
|
||||||
|
local -a devices=("$@")
|
||||||
|
local -a prepared_devices=()
|
||||||
|
|
||||||
|
{
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Starting device preparation"
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
for device in "${devices[@]}"; do
|
||||||
|
if [[ ! -b "$device" ]]; then
|
||||||
|
log_error "Invalid device path: $device"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Processing device: $device"
|
||||||
|
log_device_details "$device"
|
||||||
|
|
||||||
|
# Check if device needs preparation
|
||||||
|
if ! validate_device_state "$device"; then
|
||||||
|
log "Skipping device $device - invalid state"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Preparing device for LVM use:"
|
||||||
|
|
||||||
|
# Clean existing signatures
|
||||||
|
log " Step 1: Cleaning existing signatures"
|
||||||
|
if ! wipefs -a "$device" 2>wipefs.err; then
|
||||||
|
log_error "Failed to clean signatures"
|
||||||
|
log " Details: $(cat wipefs.err)"
|
||||||
|
rm -f wipefs.err
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
rm -f wipefs.err
|
||||||
|
log " Success: Signatures cleaned"
|
||||||
|
|
||||||
|
# Create physical volume
|
||||||
|
log " Step 2: Creating physical volume"
|
||||||
|
if ! pvcreate -ff -y "$device" 2>pv.err; then
|
||||||
|
log_error "Physical volume creation failed"
|
||||||
|
log " Details: $(cat pv.err)"
|
||||||
|
rm -f pv.err
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
rm -f pv.err
|
||||||
|
|
||||||
|
# Log success and add to prepared devices
|
||||||
|
size=$(lsblk -dbn -o SIZE "$device" | numfmt --to=iec)
|
||||||
|
log " Success: Created physical volume"
|
||||||
|
log " Device: $device"
|
||||||
|
log " Size: $size"
|
||||||
|
log_cmd "pvs --noheadings -o pv_name,vg_name,pv_size,pv_used $device" "Physical volume details"
|
||||||
|
|
||||||
|
prepared_devices+=("$device")
|
||||||
|
log "----------------------------------------"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#prepared_devices[@]} -eq 0 ]; then
|
||||||
|
log_error "No devices were successfully prepared"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
} >&2
|
||||||
|
|
||||||
|
printf '%s\n' "${prepared_devices[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to wait for device
|
||||||
|
wait_for_device() {
|
||||||
|
local device="$1"
|
||||||
|
local timeout=10
|
||||||
|
local count=0
|
||||||
|
|
||||||
|
log "Waiting for device $device to be available"
|
||||||
|
while [ ! -e "$device" ] && [ $count -lt $timeout ]; do
|
||||||
|
sleep 1
|
||||||
|
count=$((count + 1))
|
||||||
|
log " Attempt $count/$timeout"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ! -e "$device" ]; then
|
||||||
|
log_error "Device $device did not appear after $timeout seconds"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run udevadm trigger to ensure device nodes are created
|
||||||
|
log " Running udevadm trigger"
|
||||||
|
if ! udevadm trigger "$device" 2>/dev/null; then
|
||||||
|
log " WARNING: udevadm trigger failed, continuing anyway"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Give udev a moment to create device nodes
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# Run udevadm settle to wait for udev to finish processing
|
||||||
|
log " Waiting for udev to settle"
|
||||||
|
if ! udevadm settle 2>/dev/null; then
|
||||||
|
log " WARNING: udevadm settle failed, continuing anyway"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run vgscan to ensure LVM sees the device
|
||||||
|
log " Running vgscan"
|
||||||
|
if ! vgscan --mknodes 2>/dev/null; then
|
||||||
|
log " WARNING: vgscan failed, continuing anyway"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log " Device $device is now available"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to setup LVM
|
||||||
|
setup_lvm() {
|
||||||
|
local -a devices=("$@")
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Starting LVM configuration"
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
# Log initial LVM state
|
||||||
|
log "Initial LVM state:"
|
||||||
|
log_cmd "pvs" "Physical volumes"
|
||||||
|
log_cmd "vgs" "Volume groups"
|
||||||
|
log_cmd "lvs" "Logical volumes"
|
||||||
|
|
||||||
|
# Create or extend volume group
|
||||||
|
if vgs "$VG_NAME" &>/dev/null; then
|
||||||
|
log "Step 1: Extending existing volume group"
|
||||||
|
log " Target VG: $VG_NAME"
|
||||||
|
log " Devices to add: ${devices[*]}"
|
||||||
|
|
||||||
|
# Extend existing VG
|
||||||
|
if ! vgextend "$VG_NAME" "${devices[@]}" 2>vg.err; then
|
||||||
|
log_error "Volume group extension failed"
|
||||||
|
log " Details: $(cat vg.err)"
|
||||||
|
rm -f vg.err
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
rm -f vg.err
|
||||||
|
|
||||||
|
size=$(vgs --noheadings -o vg_size --units h "$VG_NAME" | tr -d ' ')
|
||||||
|
log " Success: Extended volume group"
|
||||||
|
log " Name: $VG_NAME"
|
||||||
|
log " Total size: $size"
|
||||||
|
else
|
||||||
|
log "Step 1: Creating new volume group"
|
||||||
|
log " Name: $VG_NAME"
|
||||||
|
log " Devices: ${devices[*]}"
|
||||||
|
|
||||||
|
# Create new VG
|
||||||
|
if ! vgcreate "$VG_NAME" "${devices[@]}" 2>vg.err; then
|
||||||
|
log_error "Volume group creation failed"
|
||||||
|
log " Details: $(cat vg.err)"
|
||||||
|
rm -f vg.err
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
rm -f vg.err
|
||||||
|
|
||||||
|
size=$(vgs --noheadings -o vg_size --units h "$VG_NAME" | tr -d ' ')
|
||||||
|
log " Success: Created volume group"
|
||||||
|
log " Name: $VG_NAME"
|
||||||
|
log " Size: $size"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_cmd "vgs $VG_NAME" "Volume group details"
|
||||||
|
|
||||||
|
# Create logical volume using all available space
|
||||||
|
if ! lvs "$VG_NAME/$LV_NAME" &>/dev/null; then
|
||||||
|
log "Step 2: Creating logical volume"
|
||||||
|
log " Name: $LV_NAME"
|
||||||
|
log " Size: 100% of free space"
|
||||||
|
|
||||||
|
# Create LV with yes flag
|
||||||
|
if ! lvcreate -l 100%FREE -n "$LV_NAME" "$VG_NAME" -y 2>lv.err; then
|
||||||
|
log_error "Logical volume creation failed"
|
||||||
|
log " Details: $(cat lv.err)"
|
||||||
|
rm -f lv.err
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
rm -f lv.err
|
||||||
|
|
||||||
|
size=$(lvs --noheadings -o lv_size --units h "$VG_NAME/$LV_NAME" | tr -d ' ')
|
||||||
|
log " Success: Created logical volume"
|
||||||
|
log " Name: $LV_NAME"
|
||||||
|
log " Size: $size"
|
||||||
|
log_cmd "lvs $VG_NAME/$LV_NAME" "Logical volume details"
|
||||||
|
else
|
||||||
|
log "Step 2: Logical volume already exists"
|
||||||
|
log_cmd "lvs $VG_NAME/$LV_NAME" "Existing logical volume details"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to create and mount filesystem
|
||||||
|
setup_filesystem() {
|
||||||
|
local device="/dev/$VG_NAME/$LV_NAME"
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Starting filesystem setup"
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
log "Step 1: Checking device status"
|
||||||
|
log " Device path: $device"
|
||||||
|
|
||||||
|
# Wait for device to be available
|
||||||
|
if ! wait_for_device "$device"; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for existing /nsm directory
|
||||||
|
if [[ -d "$MOUNT_POINT" ]]; then
|
||||||
|
log "WARNING: $MOUNT_POINT directory already exists"
|
||||||
|
if [[ -n "$(ls -A "$MOUNT_POINT")" ]]; then
|
||||||
|
log "WARNING: $MOUNT_POINT is not empty"
|
||||||
|
log "Contents will be hidden when mounted"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check filesystem type - don't fail if blkid fails
|
||||||
|
local fs_type
|
||||||
|
fs_type=$(blkid -o value -s TYPE "$device" 2>/dev/null || echo "none")
|
||||||
|
log " Current filesystem type: ${fs_type:-none}"
|
||||||
|
|
||||||
|
# Create XFS filesystem if needed
|
||||||
|
log "Step 2: Filesystem preparation"
|
||||||
|
if [[ "$fs_type" != "xfs" ]]; then
|
||||||
|
log " Creating new XFS filesystem:"
|
||||||
|
log " Device: $device"
|
||||||
|
log " Options: -f (force)"
|
||||||
|
|
||||||
|
# Clean any existing signatures first
|
||||||
|
wipefs -a "$device" 2>/dev/null || true
|
||||||
|
|
||||||
|
# Create filesystem with force flag
|
||||||
|
if ! mkfs.xfs -f "$device" -K -q 2>mkfs.err; then
|
||||||
|
log_error "XFS filesystem creation failed"
|
||||||
|
log " Details: $(cat mkfs.err)"
|
||||||
|
rm -f mkfs.err
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
rm -f mkfs.err
|
||||||
|
|
||||||
|
size=$(lvs --noheadings -o lv_size --units h "$VG_NAME/$LV_NAME" | tr -d ' ')
|
||||||
|
log " Success: Created XFS filesystem"
|
||||||
|
log " Device: $device"
|
||||||
|
log " Size: $size"
|
||||||
|
|
||||||
|
# Verify filesystem was created
|
||||||
|
fs_type=$(blkid -o value -s TYPE "$device")
|
||||||
|
if [[ "$fs_type" != "xfs" ]]; then
|
||||||
|
log_error "Failed to verify XFS filesystem creation"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
log " Verified XFS filesystem"
|
||||||
|
else
|
||||||
|
log " XFS filesystem already exists"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create mount point
|
||||||
|
log "Step 3: Mount point preparation"
|
||||||
|
if [[ ! -d "$MOUNT_POINT" ]]; then
|
||||||
|
log " Creating mount point directory: $MOUNT_POINT"
|
||||||
|
mkdir -p "$MOUNT_POINT"
|
||||||
|
log " Success: Directory created"
|
||||||
|
else
|
||||||
|
log " Mount point already exists: $MOUNT_POINT"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update fstab if needed
|
||||||
|
log "Step 4: Configuring persistent mount"
|
||||||
|
log " Checking current fstab entries:"
|
||||||
|
# Temporarily disable exit on error for fstab operations
|
||||||
|
set +e
|
||||||
|
|
||||||
|
# Check fstab entries without failing on no match
|
||||||
|
if ! grep -P "^/dev/$VG_NAME/$LV_NAME\\s" /etc/fstab >/dev/null 2>&1; then
|
||||||
|
log " No existing fstab entry found"
|
||||||
|
else
|
||||||
|
log_cmd "grep -P '^/dev/$VG_NAME/$LV_NAME\\s' /etc/fstab" "Current configuration"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if we need to add fstab entry
|
||||||
|
if ! grep -q "^$device.*$MOUNT_POINT" /etc/fstab >/dev/null 2>&1; then
|
||||||
|
# Re-enable exit on error for critical operations
|
||||||
|
set -e
|
||||||
|
log " Adding new fstab entry"
|
||||||
|
local mount_options="rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota"
|
||||||
|
echo "$device $MOUNT_POINT xfs $mount_options 0 0" >> /etc/fstab
|
||||||
|
log " Success: Added entry"
|
||||||
|
log " Device: $device"
|
||||||
|
log " Mount point: $MOUNT_POINT"
|
||||||
|
log " Options: $mount_options"
|
||||||
|
log_cmd "grep -P \"^/dev/$VG_NAME/$LV_NAME\\s\" /etc/fstab" "New configuration"
|
||||||
|
|
||||||
|
# Reload systemd to recognize new fstab entry
|
||||||
|
log " Reloading systemd to recognize new fstab entry"
|
||||||
|
if ! systemctl daemon-reload; then
|
||||||
|
log " WARNING: Failed to reload systemd, continuing anyway"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log " Existing fstab entry found"
|
||||||
|
# Re-enable exit on error
|
||||||
|
set -e
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the filesystem
|
||||||
|
log "Step 5: Mounting filesystem"
|
||||||
|
if ! mountpoint -q "$MOUNT_POINT"; then
|
||||||
|
log " Mounting $device to $MOUNT_POINT"
|
||||||
|
if ! mount "$MOUNT_POINT" 2>mount.err; then
|
||||||
|
log_error "Mount operation failed"
|
||||||
|
log " Details: $(cat mount.err)"
|
||||||
|
rm -f mount.err
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
rm -f mount.err
|
||||||
|
|
||||||
|
size=$(df -h "$MOUNT_POINT" | awk 'NR==2 {print $2}')
|
||||||
|
log " Success: Filesystem mounted"
|
||||||
|
log " Device: $device"
|
||||||
|
log " Mount point: $MOUNT_POINT"
|
||||||
|
log " Size: $size"
|
||||||
|
log_cmd "df -h $MOUNT_POINT" "Mount details"
|
||||||
|
else
|
||||||
|
log " Filesystem already mounted"
|
||||||
|
log_cmd "df -h $MOUNT_POINT" "Current mount details"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
check_root
|
||||||
|
|
||||||
|
# Set VG_NAME based on system UUID
|
||||||
|
VG_NAME=$(get_system_uuid)
|
||||||
|
|
||||||
|
# Check if already mounted
|
||||||
|
if mountpoint -q "$MOUNT_POINT"; then
|
||||||
|
size=$(df -h "$MOUNT_POINT" | awk 'NR==2 {print $2}')
|
||||||
|
log "$MOUNT_POINT already mounted (size: $size)"
|
||||||
|
log_cmd "df -h $MOUNT_POINT" "Current mount details"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Log initial system state with context
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Checking initial system state"
|
||||||
|
log "NOTE: If any drives were previously used in another system, you may see"
|
||||||
|
log " warnings about missing devices or volume groups below. These warnings"
|
||||||
|
log " are normal and expected when reusing drives. They indicate the drive"
|
||||||
|
log " was part of a previous system's configuration and will be automatically"
|
||||||
|
log " cleaned up in the following steps."
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
log_cmd "lsblk" "Block devices"
|
||||||
|
log_cmd "pvs" "Physical volumes"
|
||||||
|
log_cmd "vgs" "Volume groups"
|
||||||
|
log_cmd "lvs" "Logical volumes"
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Proceeding with cleanup of any previous configurations and setup for /nsm"
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
# Check if LVM is already configured
|
||||||
|
if vgs "$VG_NAME" &>/dev/null && lvs "$VG_NAME/$LV_NAME" &>/dev/null; then
|
||||||
|
log "Found existing LVM configuration"
|
||||||
|
log "Proceeding with filesystem setup"
|
||||||
|
else
|
||||||
|
# Detect NVMe devices
|
||||||
|
local -a devices=()
|
||||||
|
mapfile -t devices < <(detect_nvme_devices)
|
||||||
|
|
||||||
|
if [ ${#devices[@]} -eq 0 ]; then
|
||||||
|
log_error "No NVMe devices found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Prepare devices and get list of successfully prepared ones
|
||||||
|
local -a prepared_devices=()
|
||||||
|
mapfile -t prepared_devices < <(prepare_devices "${devices[@]}")
|
||||||
|
|
||||||
|
if [ ${#prepared_devices[@]} -eq 0 ]; then
|
||||||
|
if vgs "$VG_NAME" &>/dev/null && lvs "$VG_NAME/$LV_NAME" &>/dev/null; then
|
||||||
|
log "Devices already configured, proceeding with filesystem setup"
|
||||||
|
else
|
||||||
|
log_error "No devices were successfully prepared and no existing configuration found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Setup LVM with prepared devices
|
||||||
|
setup_lvm "${prepared_devices[@]}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create and mount filesystem
|
||||||
|
setup_filesystem
|
||||||
|
|
||||||
|
# Log final system state
|
||||||
|
log "Final system state:"
|
||||||
|
log_cmd "lsblk" "Block devices"
|
||||||
|
log_cmd "pvs" "Physical volumes"
|
||||||
|
log_cmd "vgs" "Volume groups"
|
||||||
|
log_cmd "lvs" "Logical volumes"
|
||||||
|
log_cmd "df -h $MOUNT_POINT" "Mount details"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main "$@"
|
||||||
7
salt/storage/init.sls
Normal file
7
salt/storage/init.sls
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
include:
|
||||||
|
- storage.nsm_mount
|
||||||
40
salt/storage/nsm_mount.sls
Normal file
40
salt/storage/nsm_mount.sls
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# Install required packages
|
||||||
|
storage_nsm_mount_packages:
|
||||||
|
pkg.installed:
|
||||||
|
- pkgs:
|
||||||
|
- lvm2
|
||||||
|
- xfsprogs
|
||||||
|
|
||||||
|
# Ensure log directory exists
|
||||||
|
storage_nsm_mount_logdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/log
|
||||||
|
- makedirs: True
|
||||||
|
- user: root
|
||||||
|
- group: root
|
||||||
|
- mode: 755
|
||||||
|
|
||||||
|
# Install the NSM mount script
|
||||||
|
storage_nsm_mount_script:
|
||||||
|
file.managed:
|
||||||
|
- name: /usr/sbin/so-nsm-mount
|
||||||
|
- source: salt://storage/files/so-nsm-mount
|
||||||
|
- mode: 755
|
||||||
|
- user: root
|
||||||
|
- group: root
|
||||||
|
- require:
|
||||||
|
- pkg: storage_nsm_mount_packages
|
||||||
|
- file: storage_nsm_mount_logdir
|
||||||
|
|
||||||
|
# Execute the mount script if not already mounted
|
||||||
|
storage_nsm_mount_execute:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-nsm-mount
|
||||||
|
- unless: mountpoint -q /nsm
|
||||||
|
- require:
|
||||||
|
- file: storage_nsm_mount_script
|
||||||
@@ -48,6 +48,15 @@ telegraf:
|
|||||||
- redis.sh
|
- redis.sh
|
||||||
- sostatus.sh
|
- sostatus.sh
|
||||||
- features.sh
|
- features.sh
|
||||||
|
managerhype:
|
||||||
|
- agentstatus.sh
|
||||||
|
- influxdbsize.sh
|
||||||
|
- lasthighstate.sh
|
||||||
|
- os.sh
|
||||||
|
- raid.sh
|
||||||
|
- redis.sh
|
||||||
|
- sostatus.sh
|
||||||
|
- features.sh
|
||||||
managersearch:
|
managersearch:
|
||||||
- agentstatus.sh
|
- agentstatus.sh
|
||||||
- eps.sh
|
- eps.sh
|
||||||
@@ -110,6 +119,10 @@ telegraf:
|
|||||||
- lasthighstate.sh
|
- lasthighstate.sh
|
||||||
- os.sh
|
- os.sh
|
||||||
- sostatus.sh
|
- sostatus.sh
|
||||||
|
hypervisor:
|
||||||
|
- lasthighstate.sh
|
||||||
|
- os.sh
|
||||||
|
- sostatus.sh
|
||||||
desktop:
|
desktop:
|
||||||
- lasthighstate.sh
|
- lasthighstate.sh
|
||||||
- os.sh
|
- os.sh
|
||||||
|
|||||||
23
salt/top.sls
23
salt/top.sls
@@ -8,6 +8,11 @@
|
|||||||
{% set INSTALLEDSALTVERSION = grains.saltversion %}
|
{% set INSTALLEDSALTVERSION = grains.saltversion %}
|
||||||
|
|
||||||
base:
|
base:
|
||||||
|
'salt-cloud:driver:libvirt':
|
||||||
|
- match: grain
|
||||||
|
- storage
|
||||||
|
- vm.status
|
||||||
|
- vm.user
|
||||||
|
|
||||||
'*':
|
'*':
|
||||||
- cron.running
|
- cron.running
|
||||||
@@ -120,7 +125,7 @@ base:
|
|||||||
- stig
|
- stig
|
||||||
- kafka
|
- kafka
|
||||||
|
|
||||||
'*_manager and G@saltversion:{{saltversion}} and not I@node_data:False':
|
'*_manager or *_managerhype and G@saltversion:{{saltversion}} and not I@node_data:False':
|
||||||
- match: compound
|
- match: compound
|
||||||
- salt.master
|
- salt.master
|
||||||
- ca
|
- ca
|
||||||
@@ -151,6 +156,10 @@ base:
|
|||||||
- stig
|
- stig
|
||||||
- kafka
|
- kafka
|
||||||
|
|
||||||
|
'*_managerhype and I@features:vrt and G@saltversion:{{saltversion}}':
|
||||||
|
- match: compound
|
||||||
|
- manager.hypervisor
|
||||||
|
|
||||||
'*_managersearch and G@saltversion:{{saltversion}} and not I@node_data:False':
|
'*_managersearch and G@saltversion:{{saltversion}} and not I@node_data:False':
|
||||||
- match: compound
|
- match: compound
|
||||||
- salt.master
|
- salt.master
|
||||||
@@ -291,6 +300,18 @@ base:
|
|||||||
- elasticfleet.install_agent_grid
|
- elasticfleet.install_agent_grid
|
||||||
- schedule
|
- schedule
|
||||||
|
|
||||||
|
'*_hypervisor and I@features:vrt and G@saltversion:{{saltversion}}':
|
||||||
|
- match: compound
|
||||||
|
- ssl
|
||||||
|
- sensoroni
|
||||||
|
- telegraf
|
||||||
|
- firewall
|
||||||
|
- hypervisor
|
||||||
|
- libvirt
|
||||||
|
- libvirt.images
|
||||||
|
- elasticfleet.install_agent_grid
|
||||||
|
- stig
|
||||||
|
|
||||||
'*_desktop and G@saltversion:{{saltversion}}':
|
'*_desktop and G@saltversion:{{saltversion}}':
|
||||||
- ssl
|
- ssl
|
||||||
- sensoroni
|
- sensoroni
|
||||||
|
|||||||
@@ -38,6 +38,7 @@
|
|||||||
'so-import',
|
'so-import',
|
||||||
'so-manager',
|
'so-manager',
|
||||||
'so-managersearch',
|
'so-managersearch',
|
||||||
|
'so-managerhype',
|
||||||
'so-standalone'
|
'so-standalone'
|
||||||
],
|
],
|
||||||
'sensor_roles': [
|
'sensor_roles': [
|
||||||
|
|||||||
12
salt/vars/hypervisor.map.jinja
Normal file
12
salt/vars/hypervisor.map.jinja
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{% import 'vars/init.map.jinja' as INIT %}
|
||||||
|
{%
|
||||||
|
set has_br0 = INIT.GRAINS is defined and INIT.GRAINS.ip_interfaces is defined and 'br0' in INIT.GRAINS.ip_interfaces and INIT.GRAINS.ip_interfaces.br0 is defined and INIT.GRAINS.ip_interfaces.br0|length > 0 %}
|
||||||
|
{%
|
||||||
|
set has_mainint = INIT.PILLAR is defined and INIT.PILLAR.host is defined and INIT.PILLAR.host.mainint is defined %}
|
||||||
|
{%
|
||||||
|
set fallback_ip = INIT.GRAINS.ip_interfaces.get(INIT.PILLAR.host.mainint, ['127.0.0.1'])[0] if has_mainint else '127.0.0.1' %}
|
||||||
|
{%
|
||||||
|
set ROLE_GLOBALS = {
|
||||||
|
'node_ip': INIT.GRAINS.ip_interfaces.get('br0', [fallback_ip])[0] if has_br0 else fallback_ip
|
||||||
|
}
|
||||||
|
%}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user