Merge remote-tracking branch 'origin/2.4/dev' into idstools-refactor

This commit is contained in:
DefensiveDepth
2025-11-06 10:38:37 -05:00
84 changed files with 3913 additions and 312 deletions

View File

@@ -5,10 +5,12 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
default_salt_dir=/opt/so/saltstack/default
clone_to_tmp() {
VERBOSE=0
VERY_VERBOSE=0
TEST_MODE=0
clone_to_tmp() {
# TODO Need to add a air gap option
# Make a temp location for the files
mkdir /tmp/sogh
@@ -16,19 +18,110 @@ clone_to_tmp() {
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion.git
git clone https://github.com/Security-Onion-Solutions/securityonion.git
cd /tmp
}
show_file_changes() {
local source_dir="$1"
local dest_dir="$2"
local dir_type="$3" # "salt" or "pillar"
if [ $VERBOSE -eq 0 ]; then
return
fi
echo "=== Changes for $dir_type directory ==="
# Find all files in source directory
if [ -d "$source_dir" ]; then
find "$source_dir" -type f | while read -r source_file; do
# Get relative path
rel_path="${source_file#$source_dir/}"
dest_file="$dest_dir/$rel_path"
if [ ! -f "$dest_file" ]; then
echo "ADDED: $dest_file"
if [ $VERY_VERBOSE -eq 1 ]; then
echo " (New file - showing first 20 lines)"
head -n 20 "$source_file" | sed 's/^/ + /'
echo ""
fi
elif ! cmp -s "$source_file" "$dest_file"; then
echo "MODIFIED: $dest_file"
if [ $VERY_VERBOSE -eq 1 ]; then
echo " (Changes:)"
diff -u "$dest_file" "$source_file" | sed 's/^/ /'
echo ""
fi
fi
done
fi
# Find deleted files (exist in dest but not in source)
if [ -d "$dest_dir" ]; then
find "$dest_dir" -type f | while read -r dest_file; do
# Get relative path
rel_path="${dest_file#$dest_dir/}"
source_file="$source_dir/$rel_path"
if [ ! -f "$source_file" ]; then
echo "DELETED: $dest_file"
if [ $VERY_VERBOSE -eq 1 ]; then
echo " (File was deleted)"
echo ""
fi
fi
done
fi
echo ""
}
copy_new_files() {
# Copy new files over to the salt dir
cd /tmp/sogh/securityonion
git checkout $BRANCH
VERSION=$(cat VERSION)
if [ $TEST_MODE -eq 1 ]; then
echo "=== TEST MODE: Showing what would change without making changes ==="
echo "Branch: $BRANCH"
echo "Version: $VERSION"
echo ""
fi
# Show changes before copying if verbose mode is enabled OR if in test mode
if [ $VERBOSE -eq 1 ] || [ $TEST_MODE -eq 1 ]; then
if [ $TEST_MODE -eq 1 ]; then
# In test mode, force at least basic verbose output
local old_verbose=$VERBOSE
if [ $VERBOSE -eq 0 ]; then
VERBOSE=1
fi
fi
echo "Analyzing file changes..."
show_file_changes "$(pwd)/salt" "$default_salt_dir/salt" "salt"
show_file_changes "$(pwd)/pillar" "$default_salt_dir/pillar" "pillar"
if [ $TEST_MODE -eq 1 ] && [ $old_verbose -eq 0 ]; then
# Restore original verbose setting
VERBOSE=$old_verbose
fi
fi
# If in test mode, don't copy files
if [ $TEST_MODE -eq 1 ]; then
echo "=== TEST MODE: No files were modified ==="
echo "To apply these changes, run without --test option"
rm -rf /tmp/sogh
return
fi
# We need to overwrite if there is a repo file
if [ -d /opt/so/repo ]; then
tar -czf /opt/so/repo/"$VERSION".tar.gz -C "$(pwd)/.." .
fi
rsync -a salt $default_salt_dir/
rsync -a pillar $default_salt_dir/
chown -R socore:socore $default_salt_dir/salt
@@ -45,11 +138,64 @@ got_root(){
fi
}
got_root
if [ $# -ne 1 ] ; then
show_usage() {
echo "Usage: $0 [-v] [-vv] [--test] [branch]"
echo " -v Show verbose output (files changed/added/deleted)"
echo " -vv Show very verbose output (includes file diffs)"
echo " --test Test mode - show what would change without making changes"
echo " branch Git branch to checkout (default: 2.4/main)"
echo ""
echo "Examples:"
echo " $0 # Normal operation"
echo " $0 -v # Show which files change"
echo " $0 -vv # Show files and their diffs"
echo " $0 --test # See what would change (dry run)"
echo " $0 --test -vv # Test mode with detailed diffs"
echo " $0 -v dev-branch # Use specific branch with verbose output"
exit 1
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-v)
VERBOSE=1
shift
;;
-vv)
VERBOSE=1
VERY_VERBOSE=1
shift
;;
--test)
TEST_MODE=1
shift
;;
-h|--help)
show_usage
;;
-*)
echo "Unknown option $1"
show_usage
;;
*)
# This should be the branch name
if [ -z "$BRANCH" ]; then
BRANCH="$1"
else
echo "Too many arguments"
show_usage
fi
shift
;;
esac
done
# Set default branch if not provided
if [ -z "$BRANCH" ]; then
BRANCH=2.4/main
else
BRANCH=$1
fi
got_root
clone_to_tmp
copy_new_files

View File

@@ -387,7 +387,7 @@ function syncElastic() {
if [[ -z "$SKIP_STATE_APPLY" ]]; then
echo "Elastic state will be re-applied to affected minions. This will run in the background and may take several minutes to complete."
echo "Applying elastic state to elastic minions at $(date)" >> /opt/so/log/soc/sync.log 2>&1
salt --async -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1
salt --async -C 'I@elasticsearch:enabled:true' state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1
fi
else
echo "Newly generated users/roles files are incomplete; aborting."

View File

@@ -21,6 +21,9 @@ whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false
TOPFILE=/opt/so/saltstack/default/salt/top.sls
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
SALTUPGRADED=false
SALT_CLOUD_INSTALLED=false
SALT_CLOUD_CONFIGURED=false
# used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=()
@@ -169,6 +172,8 @@ airgap_update_dockers() {
tar xf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
echo "Add Registry back"
docker load -i "$AGDOCKER/registry_image.tar"
echo "Restart registry container"
salt-call state.apply registry queue=True
fi
fi
}
@@ -420,6 +425,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160
[[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
true
}
@@ -450,6 +456,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
true
}
@@ -599,15 +606,36 @@ post_to_2.4.170() {
}
post_to_2.4.180() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# Force update to Kafka output policy
/usr/sbin/so-kafka-fleet-output-policy --force
POSTVERSION=2.4.180
}
post_to_2.4.190() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# Only need to update import / eval nodes
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
update_import_fleet_output
fi
# Check if expected default policy is logstash (global.pipeline is REDIS or "")
pipeline=$(lookup_pillar "pipeline" "global")
if [[ -z "$pipeline" ]] || [[ "$pipeline" == "REDIS" ]]; then
# Check if this grid is currently affected by corrupt fleet output policy
if elastic-agent status | grep "config: key file not configured" > /dev/null 2>&1; then
echo "Elastic Agent shows an ssl error connecting to logstash output. Updating output policy..."
update_default_logstash_output
fi
fi
# Apply new elasticsearch.server index template
rollover_index "logs-elasticsearch.server-default"
POSTVERSION=2.4.190
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -864,10 +892,15 @@ up_to_2.4.170() {
}
up_to_2.4.180() {
echo "Nothing to do for 2.4.180"
INSTALLEDVERSION=2.4.180
}
up_to_2.4.190() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.180
INSTALLEDVERSION=2.4.190
}
add_hydra_pillars() {
@@ -1143,6 +1176,44 @@ update_elasticsearch_index_settings() {
done
}
update_import_fleet_output() {
if output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" --retry 3 --fail 2>/dev/null); then
# Update the current config of so-manager_elasticsearch output policy in place (leaving any customizations like having changed the preset value from 'balanced' to 'performance')
CAFINGERPRINT=$(openssl x509 -in /etc/pki/tls/certs/intca.crt -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
updated_policy=$(jq --arg CAFINGERPRINT "$CAFINGERPRINT" '.item | (del(.id) | .ca_trusted_fingerprint = $CAFINGERPRINT)' <<< "$output")
if curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -XPUT -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$updated_policy" --retry 3 --fail 2>/dev/null; then
echo "Successfully updated so-manager_elasticsearch fleet output policy"
else
fail "Failed to update so-manager_elasticsearch fleet output policy"
fi
fi
}
update_default_logstash_output() {
echo "Updating fleet logstash output policy grid-logstash"
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
# Keep already configured hosts for this update, subsequent host updates come from so-elastic-fleet-outputs-update
HOSTS=$(echo "$logstash_policy" | jq -r '.item.hosts')
DEFAULT_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default')
DEFAULT_MONITORING_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default_monitoring')
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$(jq -n \
--argjson HOSTS "$HOSTS" \
--arg DEFAULT_ENABLED "$DEFAULT_ENABLED" \
--arg DEFAULT_MONITORING_ENABLED "$DEFAULT_MONITORING_ENABLED" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $HOSTS,"is_default": $DEFAULT_ENABLED,"is_default_monitoring": $DEFAULT_MONITORING_ENABLED,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }}}')
fi
if curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --retry 3 --retry-delay 10 --fail; then
echo "Successfully updated grid-logstash fleet output policy"
fi
}
update_salt_mine() {
echo "Populating the mine with mine_functions for each host."
set +e
@@ -1192,24 +1263,43 @@ upgrade_check_salt() {
}
upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If rhel family
if [[ $is_rpm ]]; then
# Check if salt-cloud is installed
if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true
fi
# Check if salt-cloud is configured
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
SALT_CLOUD_CONFIGURED=true
fi
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt"
yum versionlock delete "salt-minion"
yum versionlock delete "salt-master"
# Remove salt-cloud versionlock if installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
yum versionlock delete "salt-cloud"
fi
echo "Updating Salt packages."
echo ""
set +e
# if oracle run with -r to ignore repos set by bootstrap
if [[ $OS == 'oracle' ]]; then
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# Add -L flag only if salt-cloud is already installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -L -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
@@ -1222,6 +1312,10 @@ upgrade_salt() {
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
# Add salt-cloud versionlock if installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
yum versionlock add "salt-cloud-0:$NEWSALTVERSION-0.*"
fi
# Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
@@ -1254,6 +1348,7 @@ upgrade_salt() {
echo ""
exit 1
else
SALTUPGRADED=true
echo "Salt upgrade success."
echo ""
fi
@@ -1359,6 +1454,7 @@ main() {
fi
set_minionid
MINION_ROLE=$(lookup_role)
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
echo ""
if [[ $is_airgap -eq 0 ]]; then
@@ -1401,7 +1497,7 @@ main() {
if [ "$is_hotfix" == "true" ]; then
echo "Applying $HOTFIXVERSION hotfix"
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINIONID" =~ "_import" ]]; then
if [[ ! "$MINION_ROLE" == "import" ]]; then
backup_old_states_pillars
fi
copy_new_files
@@ -1464,7 +1560,7 @@ main() {
fi
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINIONID" =~ "_import" ]]; then
if [[ ! "$MINION_ROLE" == "import" ]]; then
echo ""
echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/"
backup_old_states_pillars
@@ -1496,6 +1592,11 @@ main() {
# ensure the mine is updated and populated before highstates run, following the salt-master restart
update_salt_mine
if [[ $SALT_CLOUD_CONFIGURED == true && $SALTUPGRADED == true ]]; then
echo "Updating salt-cloud config to use the new Salt version"
salt-call state.apply salt.cloud.config concurrent=True
fi
enable_highstate
echo ""

View File

@@ -211,7 +211,7 @@ Exit Codes:
Logging:
- Logs are written to /opt/so/log/salt/so-salt-cloud.log.
- Logs are written to /opt/so/log/salt/so-salt-cloud.
- Both file and console logging are enabled for real-time monitoring.
"""
@@ -233,7 +233,7 @@ local = salt.client.LocalClient()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler('/opt/so/log/salt/so-salt-cloud.log')
file_handler = logging.FileHandler('/opt/so/log/salt/so-salt-cloud')
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(message)s')
@@ -516,23 +516,85 @@ def run_qcow2_modify_hardware_config(profile, vm_name, cpu=None, memory=None, pc
target = hv_name + "_*"
try:
args_list = [
'vm_name=' + vm_name,
'cpu=' + str(cpu) if cpu else '',
'memory=' + str(memory) if memory else '',
'start=' + str(start)
]
args_list = ['vm_name=' + vm_name]
# Only add parameters that are actually specified
if cpu is not None:
args_list.append('cpu=' + str(cpu))
if memory is not None:
args_list.append('memory=' + str(memory))
# Add PCI devices if provided
if pci_list:
# Pass all PCI devices as a comma-separated list
args_list.append('pci=' + ','.join(pci_list))
# Always add start parameter
args_list.append('start=' + str(start))
result = local.cmd(target, 'qcow2.modify_hardware_config', args_list)
format_qcow2_output('Hardware configuration', result)
except Exception as e:
logger.error(f"An error occurred while running qcow2.modify_hardware_config: {e}")
def run_qcow2_create_volume_config(profile, vm_name, size_gb, cpu=None, memory=None, start=False):
"""Create a volume for the VM and optionally configure CPU/memory.
Args:
profile (str): The cloud profile name
vm_name (str): The name of the VM
size_gb (int): Size of the volume in GB
cpu (int, optional): Number of CPUs to assign
memory (int, optional): Amount of memory in MiB
start (bool): Whether to start the VM after configuration
"""
hv_name = profile.split('_')[1]
target = hv_name + "_*"
try:
# Step 1: Create the volume
logger.info(f"Creating {size_gb}GB volume for VM {vm_name}")
volume_result = local.cmd(
target,
'qcow2.create_volume_config',
kwarg={
'vm_name': vm_name,
'size_gb': size_gb,
'start': False # Don't start yet if we need to configure CPU/memory
}
)
format_qcow2_output('Volume creation', volume_result)
# Step 2: Configure CPU and memory if specified
if cpu or memory:
logger.info(f"Configuring hardware for VM {vm_name}: CPU={cpu}, Memory={memory}MiB")
hw_result = local.cmd(
target,
'qcow2.modify_hardware_config',
kwarg={
'vm_name': vm_name,
'cpu': cpu,
'memory': memory,
'start': start
}
)
format_qcow2_output('Hardware configuration', hw_result)
elif start:
# If no CPU/memory config needed but we need to start the VM
logger.info(f"Starting VM {vm_name}")
start_result = local.cmd(
target,
'qcow2.modify_hardware_config',
kwarg={
'vm_name': vm_name,
'start': True
}
)
format_qcow2_output('VM startup', start_result)
except Exception as e:
logger.error(f"An error occurred while creating volume and configuring hardware: {e}")
def run_qcow2_modify_network_config(profile, vm_name, mode, ip=None, gateway=None, dns=None, search_domain=None):
hv_name = profile.split('_')[1]
target = hv_name + "_*"
@@ -586,6 +648,7 @@ def parse_arguments():
network_group.add_argument('-c', '--cpu', type=int, help='Number of virtual CPUs to assign.')
network_group.add_argument('-m', '--memory', type=int, help='Amount of memory to assign in MiB.')
network_group.add_argument('-P', '--pci', action='append', help='PCI hardware ID(s) to passthrough to the VM (e.g., 0000:c7:00.0). Can be specified multiple times.')
network_group.add_argument('--nsm-size', type=int, help='Size in GB for NSM volume creation. Can be used with copper/sfp NICs (--pci). Only disk passthrough (without --nsm-size) prevents volume creation.')
args = parser.parse_args()
@@ -621,6 +684,8 @@ def main():
hw_config.append(f"{args.memory}MB RAM")
if args.pci:
hw_config.append(f"PCI devices: {', '.join(args.pci)}")
if args.nsm_size:
hw_config.append(f"NSM volume: {args.nsm_size}GB")
hw_string = f" and hardware config: {', '.join(hw_config)}" if hw_config else ""
logger.info(f"Received request to create VM '{args.vm_name}' using profile '{args.profile}' {network_config}{hw_string}")
@@ -643,8 +708,58 @@ def main():
# Step 2: Provision the VM (without starting it)
call_salt_cloud(args.profile, args.vm_name)
# Step 3: Modify hardware configuration
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=args.cpu, memory=args.memory, pci_list=args.pci, start=True)
# Step 3: Determine storage configuration approach
# Priority: disk passthrough > volume creation (but volume can coexist with copper/sfp NICs)
# Note: virtual_node_manager.py already filters out --nsm-size when disk is present,
# so if both --pci and --nsm-size are present here, the PCI devices are copper/sfp NICs
use_passthrough = False
use_volume_creation = False
has_nic_passthrough = False
if args.nsm_size:
# Validate nsm_size
if args.nsm_size <= 0:
logger.error(f"Invalid nsm_size value: {args.nsm_size}. Must be a positive integer.")
sys.exit(1)
use_volume_creation = True
logger.info(f"Using volume creation with size {args.nsm_size}GB (--nsm-size parameter specified)")
if args.pci:
# If both nsm_size and PCI are present, PCI devices are copper/sfp NICs
# (virtual_node_manager.py filters out nsm_size when disk is present)
has_nic_passthrough = True
logger.info(f"PCI devices (copper/sfp NICs) will be passed through along with volume: {', '.join(args.pci)}")
elif args.pci:
# Only PCI devices, no nsm_size - could be disk or NICs
# this script is called by virtual_node_manager and that strips any possibility that nsm_size and the disk pci slot is sent to this script
# we might have not specified a disk passthrough or nsm_size, but pass another pci slot and we end up here
use_passthrough = True
logger.info(f"Configuring PCI device passthrough.(--pci parameter specified without --nsm-size)")
# Step 4: Configure hardware based on storage approach
if use_volume_creation:
# Create volume first
run_qcow2_create_volume_config(args.profile, args.vm_name, size_gb=args.nsm_size, cpu=args.cpu, memory=args.memory, start=False)
# Then configure NICs if present
if has_nic_passthrough:
logger.info(f"Configuring NIC passthrough for VM {args.vm_name}")
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=None, memory=None, pci_list=args.pci, start=True)
else:
# No NICs, just start the VM
logger.info(f"Starting VM {args.vm_name}")
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=None, memory=None, pci_list=None, start=True)
elif use_passthrough:
# Use existing passthrough logic via modify_hardware_config
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=args.cpu, memory=args.memory, pci_list=args.pci, start=True)
else:
# No storage configuration, just configure CPU/memory if specified
if args.cpu or args.memory:
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=args.cpu, memory=args.memory, pci_list=None, start=True)
else:
# No hardware configuration needed, just start the VM
logger.info(f"No hardware configuration specified, starting VM {args.vm_name}")
run_qcow2_modify_hardware_config(args.profile, args.vm_name, cpu=None, memory=None, pci_list=None, start=True)
except KeyboardInterrupt:
logger.error("so-salt-cloud: Operation cancelled by user.")