handle refreshing base image and reinstalling the vm if the source qcow2 image changes

This commit is contained in:
m0duspwnens
2025-01-17 19:27:04 -05:00
parent 1f13554bd9
commit 54eeb0e327
2 changed files with 113 additions and 25 deletions

View File

@@ -10,6 +10,9 @@
# software that is protected by the license key."
"""
TODO: Change default disk_size from 60G to 220G. this was set to speed up vm start during development
Remove passwd hash prior to release. used for development
This runner performs the initial setup required for hypervisor hosts in the environment.
It handles downloading the Oracle Linux KVM image, setting up SSH keys for secure
communication, and creating the initial VM.
@@ -24,10 +27,10 @@ but can also be run manually if needed.
CLI Examples:
# Perform complete environment setup (creates VM named 'sool9' with 220G disk by default)
# Perform complete environment setup (creates VM named 'sool9' with 60G disk by default)
salt-run setup_hypervisor.setup_environment
# Setup with custom VM name (uses default 220G disk)
# Setup with custom VM name (uses default 60G disk)
salt-run setup_hypervisor.setup_environment myvm
# Setup with custom VM name and disk size
@@ -36,7 +39,7 @@ CLI Examples:
# Regenerate SSH keys only
salt-run setup_hypervisor.regenerate_ssh_keys
# Create additional VM with default disk size (220G)
# Create additional VM with default disk size (60G)
salt-run setup_hypervisor.create_vm myvm2
# Create additional VM with custom disk size
@@ -301,7 +304,7 @@ def _check_vm_exists(vm_name: str) -> bool:
log.info("MAIN: VM %s already exists", vm_name)
return exists
def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id: str = None):
def setup_environment(vm_name: str = 'sool9', disk_size: str = '60G', minion_id: str = None):
"""
Main entry point to set up the hypervisor environment.
This includes downloading the base image, generating SSH keys for remote access,
@@ -311,7 +314,7 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
vm_name (str, optional): Name of the VM to create as part of environment setup.
Defaults to 'sool9'.
disk_size (str, optional): Size of the VM disk with unit.
Defaults to '220G'.
Defaults to '60G'.
Returns:
dict: Dictionary containing setup status and VM creation results
@@ -397,13 +400,13 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
'vm_result': vm_result
}
def create_vm(vm_name: str, disk_size: str = '220G'):
def create_vm(vm_name: str, disk_size: str = '60G'):
"""
Create a new VM with cloud-init configuration.
Args:
vm_name (str): Name of the VM
disk_size (str): Size of the disk with unit (default: '220G')
disk_size (str): Size of the disk with unit (default: '60G')
Returns:
dict: Dictionary containing success status and commands to run on hypervisor
@@ -496,10 +499,6 @@ ssh_genkeytypes: ['ed25519', 'rsa']
# set timezone for VM
timezone: UTC
# Install QEMU guest agent. Enable and start the service
packages:
- qemu-guest-agent
write_files:
- path: /etc/yum.repos.d/securityonion.repo
content: |
@@ -510,15 +509,22 @@ write_files:
gpgcheck=1
sslverify=0
packages:
- qemu-guest-agent
runcmd:
- systemctl enable --now qemu-guest-agent
# Remove all repo files except securityonion.repo
- for f in /etc/yum.repos.d/*.repo; do if [ "$(basename $f)" != "securityonion.repo" ]; then rm -f "$f"; fi; done
- systemctl enable --now serial-getty@ttyS0.service
- systemctl enable --now NetworkManager
- systemctl enable --now qemu-guest-agent
- growpart /dev/vda 2
- pvresize /dev/vda2
- lvextend -l +100%FREE /dev/vg_main/lv_root
- xfs_growfs /dev/vg_main/lv_root
- touch /etc/cloud/cloud-init.disabled
- systemctl stop cloud-init
- systemctl disable cloud-init
- dnf remove cloud-init
- shutdown -P now
"""
user_data_path = os.path.join(vm_dir, 'user-data')
@@ -603,6 +609,19 @@ runcmd:
user_data_path, meta_data_path],
check=True, capture_output=True)
# Generate SHA256 hash of the qcow2 image
sha256_hash = hashlib.sha256()
with salt.utils.files.fopen(vm_image, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
sha256_hash.update(chunk)
# Write hash to file
hash_file = os.path.join(vm_dir, f'{vm_name}.sha256')
with salt.utils.files.fopen(hash_file, 'w') as f:
f.write(sha256_hash.hexdigest())
log.info("CREATEVM: Generated SHA256 hash for %s", vm_image)
return {
'success': True,
'vm_dir': vm_dir

View File

@@ -16,13 +16,43 @@
include:
- libvirt.packages
# Copy base image files
baseimagefiles_sool9:
file.recurse:
- name: /nsm/libvirt/images/sool9/
- source: salt://libvirt/images/sool9/
# Manage SHA256 hash file
manage_sha256_sool9:
file.managed:
- name: /nsm/libvirt/images/sool9/sool9.sha256
- source: salt://libvirt/images/sool9/sool9.sha256
- makedirs: True
# Manage qcow2 image
manage_qcow2_sool9:
file.managed:
- name: /nsm/libvirt/images/sool9/sool9.qcow2
- source: salt://libvirt/images/sool9/sool9.qcow2
- onchanges:
- file: manage_sha256_sool9
# Manage cloud-init files
manage_metadata_sool9:
file.managed:
- name: /nsm/libvirt/images/sool9/meta-data
- source: salt://libvirt/images/sool9/meta-data
- require:
- file: manage_qcow2_sool9
manage_userdata_sool9:
file.managed:
- name: /nsm/libvirt/images/sool9/user-data
- source: salt://libvirt/images/sool9/user-data
- require:
- file: manage_qcow2_sool9
manage_cidata_sool9:
file.managed:
- name: /nsm/libvirt/images/sool9/sool9-cidata.iso
- source: salt://libvirt/images/sool9/sool9-cidata.iso
- require:
- file: manage_qcow2_sool9
# Define the storage pool
define_storage_pool_sool9:
virt.pool_defined:
@@ -30,18 +60,56 @@ define_storage_pool_sool9:
- ptype: dir
- target: /nsm/libvirt/images/sool9
- require:
- file: baseimagefiles_sool9
- file: manage_metadata_sool9
- file: manage_userdata_sool9
- file: manage_cidata_sool9
- cmd: libvirt_python_module
- unless:
- virsh pool-list --all | grep -q sool9
# Set pool autostart
set_pool_autostart_sool9:
cmd.run:
- name: virsh pool-autostart sool9
- require:
- virt: define_storage_pool_sool9
- unless:
- virsh pool-info sool9 | grep -q "Autostart.*yes"
# Start the storage pool
start_storage_pool_sool9:
virt.pool_running:
- name: sool9
- ptype: dir
- target: /nsm/libvirt/images/sool9
cmd.run:
- name: virsh pool-start sool9
- require:
- virt: define_storage_pool_sool9
- cmd: libvirt_python_module
- unless:
- virsh pool-info sool9 | grep -q "State.*running"
# Stop the VM if running and base image files change
stop_vm_sool9:
module.run:
- virt.stop:
- name: sool9
- onchanges:
- file: manage_qcow2_sool9
- require_in:
- module: undefine_vm_sool9
- onlyif:
# Only try to stop if VM is actually running
- virsh list --state-running --name | grep -q sool9
undefine_vm_sool9:
module.run:
- virt.undefine:
- vm_: sool9
- onchanges:
- file: manage_qcow2_sool9
# Note: When VM doesn't exist, you'll see "error: failed to get domain 'sool9'" - this is expected
# [ERROR ] Command 'virsh' failed with return code: 1
# [ERROR ] stdout: error: failed to get domain 'sool9'
- onlyif:
- virsh dominfo sool9
# Create and start the VM using virt-install
create_vm_sool9:
@@ -55,10 +123,11 @@ create_vm_sool9:
--os-variant=ol9.5 \
--import \
--noautoconsole
- unless: virsh list --all | grep -q sool9
- require:
- virt: start_storage_pool_sool9
- cmd: start_storage_pool_sool9
- pkg: install_virt-install
- onchanges:
- file: manage_qcow2_sool9
{% else %}
{{sls}}_no_license_detected: