Merge pull request #14899 from Security-Onion-Solutions/vlb2

handle - in hypervisor hostname
This commit is contained in:
Josh Patterson
2025-08-04 17:50:41 -04:00
committed by GitHub
6 changed files with 21 additions and 21 deletions

View File

@@ -77,10 +77,10 @@ Examples:
1. Static IP Configuration with Multiple PCI Devices: 1. Static IP Configuration with Multiple PCI Devices:
Command: Command:
so-salt-cloud -p sool9-hyper1 vm1_sensor --static4 --ip4 192.168.1.10/24 --gw4 192.168.1.1 \ so-salt-cloud -p sool9_hyper1 vm1_sensor --static4 --ip4 192.168.1.10/24 --gw4 192.168.1.1 \
--dns4 192.168.1.1,192.168.1.2 --search4 example.local -c 4 -m 8192 -P 0000:c7:00.0 -P 0000:c4:00.0 --dns4 192.168.1.1,192.168.1.2 --search4 example.local -c 4 -m 8192 -P 0000:c7:00.0 -P 0000:c4:00.0
This command provisions a VM named vm1_sensor using the sool9-hyper1 profile with the following settings: This command provisions a VM named vm1_sensor using the sool9_hyper1 profile with the following settings:
- Static IPv4 configuration: - Static IPv4 configuration:
- IP Address: 192.168.1.10/24 - IP Address: 192.168.1.10/24
@@ -95,21 +95,21 @@ Examples:
2. DHCP Configuration with Default Hardware Settings: 2. DHCP Configuration with Default Hardware Settings:
Command: Command:
so-salt-cloud -p sool9-hyper1 vm2_master --dhcp4 so-salt-cloud -p sool9_hyper1 vm2_master --dhcp4
This command provisions a VM named vm2_master using the sool9-hyper1 profile with DHCP for network configuration and default hardware settings. This command provisions a VM named vm2_master using the sool9_hyper1 profile with DHCP for network configuration and default hardware settings.
3. Static IP Configuration without Hardware Specifications: 3. Static IP Configuration without Hardware Specifications:
Command: Command:
so-salt-cloud -p sool9-hyper1 vm3_search --static4 --ip4 192.168.1.20/24 --gw4 192.168.1.1 so-salt-cloud -p sool9_hyper1 vm3_search --static4 --ip4 192.168.1.20/24 --gw4 192.168.1.1
This command provisions a VM named vm3_search with a static IP configuration and default hardware settings. This command provisions a VM named vm3_search with a static IP configuration and default hardware settings.
4. DHCP Configuration with Custom Hardware Specifications and Multiple PCI Devices: 4. DHCP Configuration with Custom Hardware Specifications and Multiple PCI Devices:
Command: Command:
so-salt-cloud -p sool9-hyper1 vm4_node --dhcp4 -c 8 -m 16384 -P 0000:c7:00.0 -P 0000:c4:00.0 -P 0000:c4:00.1 so-salt-cloud -p sool9_hyper1 vm4_node --dhcp4 -c 8 -m 16384 -P 0000:c7:00.0 -P 0000:c4:00.0 -P 0000:c4:00.1
This command provisions a VM named vm4_node using DHCP for network configuration and custom hardware settings: This command provisions a VM named vm4_node using DHCP for network configuration and custom hardware settings:
@@ -120,9 +120,9 @@ Examples:
5. Static IP Configuration with DNS and Search Domain: 5. Static IP Configuration with DNS and Search Domain:
Command: Command:
so-salt-cloud -p sool9-hyper1 vm1_sensor --static4 --ip4 192.168.1.10/24 --gw4 192.168.1.1 --dns4 192.168.1.1 --search4 example.local so-salt-cloud -p sool9_hyper1 vm1_sensor --static4 --ip4 192.168.1.10/24 --gw4 192.168.1.1 --dns4 192.168.1.1 --search4 example.local
This command provisions a VM named vm1_sensor using the sool9-hyper1 profile with static IPv4 configuration: This command provisions a VM named vm1_sensor using the sool9_hyper1 profile with static IPv4 configuration:
- Static IPv4 configuration: - Static IPv4 configuration:
- IP Address: 192.168.1.10/24 - IP Address: 192.168.1.10/24
@@ -133,14 +133,14 @@ Examples:
6. Delete a VM with Confirmation: 6. Delete a VM with Confirmation:
Command: Command:
so-salt-cloud -p sool9-hyper1 vm1_sensor -d so-salt-cloud -p sool9_hyper1 vm1_sensor -d
This command deletes the VM named vm1_sensor and will prompt for confirmation before proceeding. This command deletes the VM named vm1_sensor and will prompt for confirmation before proceeding.
7. Delete a VM without Confirmation: 7. Delete a VM without Confirmation:
Command: Command:
so-salt-cloud -p sool9-hyper1 vm1_sensor -yd so-salt-cloud -p sool9_hyper1 vm1_sensor -yd
This command deletes the VM named vm1_sensor without prompting for confirmation. This command deletes the VM named vm1_sensor without prompting for confirmation.
@@ -439,8 +439,8 @@ def call_salt_cloud(profile, vm_name, destroy=False, assume_yes=False):
delete_vm(profile, vm_name, assume_yes) delete_vm(profile, vm_name, assume_yes)
return return
# Extract hypervisor hostname from profile (e.g., sool9-jpphype1 -> jpphype1) # Extract hypervisor hostname from profile (e.g., sool9_hype1 -> hype1)
hypervisor = profile.split('-', 1)[1] if '-' in profile else None hypervisor = profile.split('_', 1)[1] if '_' in profile else None
if hypervisor: if hypervisor:
logger.info("Ensuring host key exists for hypervisor %s", hypervisor) logger.info("Ensuring host key exists for hypervisor %s", hypervisor)
if not _add_hypervisor_host_key(hypervisor): if not _add_hypervisor_host_key(hypervisor):
@@ -512,7 +512,7 @@ def format_qcow2_output(operation, result):
logger.info(f"{operation} result from {host}: {host_result}") logger.info(f"{operation} result from {host}: {host_result}")
def run_qcow2_modify_hardware_config(profile, vm_name, cpu=None, memory=None, pci_list=None, start=False): def run_qcow2_modify_hardware_config(profile, vm_name, cpu=None, memory=None, pci_list=None, start=False):
hv_name = profile.split('-')[1] hv_name = profile.split('_')[1]
target = hv_name + "_*" target = hv_name + "_*"
try: try:
@@ -534,7 +534,7 @@ def run_qcow2_modify_hardware_config(profile, vm_name, cpu=None, memory=None, pc
logger.error(f"An error occurred while running qcow2.modify_hardware_config: {e}") logger.error(f"An error occurred while running qcow2.modify_hardware_config: {e}")
def run_qcow2_modify_network_config(profile, vm_name, mode, ip=None, gateway=None, dns=None, search_domain=None): def run_qcow2_modify_network_config(profile, vm_name, mode, ip=None, gateway=None, dns=None, search_domain=None):
hv_name = profile.split('-')[1] hv_name = profile.split('_')[1]
target = hv_name + "_*" target = hv_name + "_*"
image = '/nsm/libvirt/images/sool9/sool9.qcow2' image = '/nsm/libvirt/images/sool9/sool9.qcow2'
interface = 'enp1s0' interface = 'enp1s0'

View File

@@ -70,13 +70,13 @@
{% set vm_name = tag.split('/')[2] %} {% set vm_name = tag.split('/')[2] %}
{% do salt.log.debug('dyanno_hypervisor_orch: Got vm_name from tag: ' ~ vm_name) %} {% do salt.log.debug('dyanno_hypervisor_orch: Got vm_name from tag: ' ~ vm_name) %}
{% if tag.endswith('/deploying') %} {% if tag.endswith('/deploying') %}
{% set hypervisor = data.get('kwargs').get('cloud_grains').get('profile').split('-')[1] %} {% set hypervisor = data.get('kwargs').get('cloud_grains').get('profile').split('_')[1] %}
{% endif %} {% endif %}
{# Set the hypervisor #} {# Set the hypervisor #}
{# First try to get it from the event #} {# First try to get it from the event #}
{% if data.get('profile', False) %} {% if data.get('profile', False) %}
{% do salt.log.debug('dyanno_hypervisor_orch: Did not get cache.grains.') %} {% do salt.log.debug('dyanno_hypervisor_orch: Did not get cache.grains.') %}
{% set hypervisor = data.profile.split('-')[1] %} {% set hypervisor = data.profile.split('_')[1] %}
{% do salt.log.debug('dyanno_hypervisor_orch: Got hypervisor from data: ' ~ hypervisor) %} {% do salt.log.debug('dyanno_hypervisor_orch: Got hypervisor from data: ' ~ hypervisor) %}
{% else %} {% else %}
{% set hypervisor = find_hypervisor_from_status(vm_name) %} {% set hypervisor = find_hypervisor_from_status(vm_name) %}

View File

@@ -6,7 +6,7 @@
{%- for role, hosts in HYPERVISORS.items() %} {%- for role, hosts in HYPERVISORS.items() %}
{%- for host in hosts.keys() %} {%- for host in hosts.keys() %}
sool9-{{host}}: sool9_{{host}}:
provider: kvm-ssh-{{host}} provider: kvm-ssh-{{host}}
base_domain: sool9 base_domain: sool9
ip_source: qemu-agent ip_source: qemu-agent

View File

@@ -650,7 +650,7 @@ def process_vm_creation(hypervisor_path: str, vm_config: dict) -> None:
create_vm_tracking_file(hypervisor_path, vm_name, vm_config) create_vm_tracking_file(hypervisor_path, vm_name, vm_config)
# Build and execute so-salt-cloud command # Build and execute so-salt-cloud command
cmd = ['so-salt-cloud', '-p', f'sool9-{hypervisor}', vm_name] cmd = ['so-salt-cloud', '-p', f'sool9_{hypervisor}', vm_name]
# Add network configuration # Add network configuration
if vm_config['network_mode'] == 'static4': if vm_config['network_mode'] == 'static4':
@@ -788,7 +788,7 @@ def process_vm_deletion(hypervisor_path: str, vm_name: str) -> None:
log.warning("Failed to read VM config from tracking file %s: %s", vm_file, str(e)) log.warning("Failed to read VM config from tracking file %s: %s", vm_file, str(e))
# Attempt VM deletion with so-salt-cloud # Attempt VM deletion with so-salt-cloud
cmd = ['so-salt-cloud', '-p', f'sool9-{hypervisor}', vm_name, '-yd'] cmd = ['so-salt-cloud', '-p', f'sool9_{hypervisor}', vm_name, '-yd']
log.info("Executing: %s", ' '.join(cmd)) log.info("Executing: %s", ' '.join(cmd))
result = subprocess.run(cmd, capture_output=True, text=True, check=True) result = subprocess.run(cmd, capture_output=True, text=True, check=True)

View File

@@ -4,7 +4,7 @@
Elastic License 2.0. #} Elastic License 2.0. #}
{% set nodetype = grains.id.split("_") | last %} {% set nodetype = grains.id.split("_") | last %}
{% set hypervisor = salt['grains.get']('salt-cloud:profile').split('-')[1] %} {% set hypervisor = salt['grains.get']('salt-cloud:profile').split('_')[1] %}
{# Import hardware details from VM hardware tracking file #} {# Import hardware details from VM hardware tracking file #}
{% import_json 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ grains.id as vm_hardware %} {% import_json 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ grains.id as vm_hardware %}

View File

@@ -19,7 +19,7 @@ vm_highstate_trigger:
- data: - data:
status: Highstate Initiated status: Highstate Initiated
vm_name: {{ grains.id }} vm_name: {{ grains.id }}
hypervisor: {{ salt['grains.get']('salt-cloud:profile', '').split('-')[1] }} hypervisor: {{ salt['grains.get']('salt-cloud:profile', '').split('_')[1] }}
- unless: test -f /opt/so/state/highstate_trigger.txt - unless: test -f /opt/so/state/highstate_trigger.txt
- order: 1 # Ensure this runs early in the highstate process - order: 1 # Ensure this runs early in the highstate process