diff --git a/salt/hypervisor/map.jinja b/salt/hypervisor/map.jinja new file mode 100644 index 000000000..b187bdfb1 --- /dev/null +++ b/salt/hypervisor/map.jinja @@ -0,0 +1,45 @@ +{# Import defaults.yaml for model hardware capabilities #} +{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %} + +{# Get hypervisor nodes from pillar #} +{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %} + +{# Build enhanced HYPERVISORS structure #} +{% set HYPERVISORS = {} %} +{% for role, hypervisors in NODES.items() %} + {% do HYPERVISORS.update({role: {}}) %} + {% for hypervisor, config in hypervisors.items() %} + {# Get model from cached grains using Salt runner #} + {% set grains = salt.saltutil.runner('cache.grains', tgt=hypervisor ~ '_*', tgt_type='glob') %} + {% set model = '' %} + {% if grains %} + {% set minion_id = grains.keys() | first %} + {% set model = grains[minion_id].get('sosmodel', '') %} + {% endif %} + {% set model_config = DEFAULTS.hypervisor.model.get(model, {}) %} + + {# Get VM list and states #} + {% set vms = {} %} + {% import_json 'hypervisor/hosts/' ~ hypervisor ~ 'VMs' as vm_list %} + + {# Load state for each VM #} + {% for vm in vm_list %} + {% set hostname = vm.get('hostname', '') %} + {% set role = vm.get('role', '') %} + {% if hostname and role %} + {% import_json 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role as vm_state %} + {% do vms.update({hostname: vm_state}) %} + {% endif %} + {% endfor %} + + {# Merge node config with model capabilities and VM states #} + {% do HYPERVISORS[role].update({ + hypervisor: { + 'config': config, + 'model': model, + 'hardware': model_config.get('hardware', {}), + 'vms': vms + } + }) %} + {% endfor %} +{% endfor %} \ No newline at end of file diff --git a/salt/salt/engines/master/virtual_node_manager.py b/salt/salt/engines/master/virtual_node_manager.py index da8c8dab4..ef04032ce 100644 --- a/salt/salt/engines/master/virtual_node_manager.py +++ b/salt/salt/engines/master/virtual_node_manager.py @@ -9,7 +9,7 @@ Salt Engine for Virtual Node Management This engine manages the automated provisioning of virtual machines in Security Onion's -virtualization infrastructure. It processes VM configurations from a nodes file and handles +virtualization infrastructure. It processes VM configurations from a VMs file and handles the entire provisioning process including hardware allocation, state tracking, and file ownership. Usage: @@ -26,8 +26,8 @@ Options: will automatically be converted to MiB when passed to so-salt-cloud. Configuration Files: - nodes: JSON file containing VM configurations - - Located at //nodes + VMs: JSON file containing VM configurations + - Located at /VMs - Contains array of VM configurations - Each VM config specifies hardware and network settings @@ -54,6 +54,7 @@ State Files: VM Tracking Files: - : Active VM with status 'creating' or 'running' - .error: Error state with detailed message + Notes: - Requires 'hvn' feature license - Uses hypervisor's sosmodel grain for hardware capabilities @@ -76,7 +77,7 @@ Description: - Prevents operation if license is invalid 3. Configuration Processing - - Reads nodes file from each hypervisor directory + - Reads VMs file for each hypervisor - Validates configuration parameters - Compares against existing VM tracking files @@ -693,7 +694,7 @@ def process_hypervisor(hypervisor_path: str) -> None: are protected by the engine-wide lock that is acquired at engine start. The function performs the following steps: - 1. Reads and validates nodes configuration + 1. Reads VMs configuration from VMs file 2. Identifies existing VMs 3. Processes new VM creation requests 4. Handles VM deletions for removed configurations @@ -702,13 +703,18 @@ def process_hypervisor(hypervisor_path: str) -> None: hypervisor_path: Path to the hypervisor directory """ try: - # Detection phase - no lock needed - nodes_file = os.path.join(hypervisor_path, 'nodes') - if not os.path.exists(nodes_file): + # Get hypervisor name from path + hypervisor = os.path.basename(hypervisor_path) + + # Read VMs file instead of nodes + vms_file = os.path.join(os.path.dirname(hypervisor_path), f"{hypervisor}VMs") + if not os.path.exists(vms_file): + log.debug("No VMs file found at %s", vms_file) return - nodes_config = read_json_file(nodes_file) + nodes_config = read_json_file(vms_file) if not nodes_config: + log.debug("Empty VMs configuration in %s", vms_file) return # Get existing VMs - no lock needed diff --git a/salt/soc/dyanno/hypervisor/hypervisor.yaml b/salt/soc/dyanno/hypervisor/hypervisor.yaml index 2b9a0d70e..477525259 100644 --- a/salt/soc/dyanno/hypervisor/hypervisor.yaml +++ b/salt/soc/dyanno/hypervisor/hypervisor.yaml @@ -1,49 +1,33 @@ hypervisor: hosts: defaultHost: - hardwareMap: - title: 'All Hardware' - description: This shows hardware available to the hypervisor and PCIe -> INT mapping. - file: true - readonly: true - global: true # set to true to remove host drop down - multiline: true - vmMap: - title: 'VM Map' - description: This shows the VMs and the hardware they have claimed. - file: true - readonly: true - global: true - multiline: true - nodes: - description: 'Available CPU: CPUFREE | Available Memory: MEMFREE | Available Disk: DISKFREE | Available Copper NIC: COPPERFREE | Available SFP NIC: SFPFREE' - syntax: json - uiElements: - - field: hostname - label: Enter the hostname - - field: role - label: sensor or searchnode - - field: network_mode - label: Choose static4 or dhcp4. If static4, populate IP details below. - - field: ip4 - label: IP Address with netmask. ex. 192.168.1.10/24 - - field: gw4 - label: Gateway - - field: dns4 - label: DNS. Comma separated list. ex. 192.168.1.1,8.8.8.8 - - field: search4 - label: Search domain - - field: cpu - label: Number of CPU cores to assign. ex. 8 - - field: memory - label: Memory, in GB to assign. ex. 16 - - field: disk - label: Choose a disk or disks to assign for passthrough. Comma separated list. - - field: copper - label: Choose a copper port or ports to assign for passthrough. Comma separated list. - - field: sfp - label: Choose a sfp port or ports to assign for passthrough. Comma separated list. - file: true - global: true - - vms: {} + title: defaultHost + description: 'Available CPU: CPUFREE | Available Memory: MEMFREE | Available Disk: DISKFREE | Available Copper NIC: COPPERFREE | Available SFP NIC: SFPFREE' + syntax: json + uiElements: + - field: hostname + label: Enter the hostname + - field: role + label: sensor or searchnode + - field: network_mode + label: Choose static4 or dhcp4. If static4, populate IP details below. + - field: ip4 + label: IP Address with netmask. ex. 192.168.1.10/24 + - field: gw4 + label: Gateway + - field: dns4 + label: DNS. Comma separated list. ex. 192.168.1.1,8.8.8.8 + - field: search4 + label: Search domain + - field: cpu + label: Number of CPU cores to assign. ex. 8 + - field: memory + label: Memory, in GB to assign. ex. 16 + - field: disk + label: Choose a disk or disks to assign for passthrough. Comma separated list. + - field: copper + label: Choose a copper port or ports to assign for passthrough. Comma separated list. + - field: sfp + label: Choose a sfp port or ports to assign for passthrough. Comma separated list. + file: true + global: true diff --git a/salt/soc/dyanno/hypervisor/init.sls b/salt/soc/dyanno/hypervisor/init.sls index a0b83fd5d..5645a8f63 100644 --- a/salt/soc/dyanno/hypervisor/init.sls +++ b/salt/soc/dyanno/hypervisor/init.sls @@ -1,4 +1,4 @@ -{% from 'soc/dyanno/hypervisor/map.jinja' import HYPERVISORS %} +{% from 'hypervisor/map.jinja' import HYPERVISORS %} hypervisor_annotation: file.managed: diff --git a/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja b/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja index 74996605e..2e274157b 100644 --- a/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja +++ b/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja @@ -1,27 +1,66 @@ {%- import_yaml 'soc/dyanno/hypervisor/hypervisor.yaml' as ANNOTATION -%} +{%- from 'hypervisor/map.jinja' import HYPERVISORS -%} {%- set TEMPLATE = ANNOTATION.hypervisor.hosts.pop('defaultHost') -%} {%- macro update_description(description, cpu_free, mem_free, disk_free, copper_free, sfp_free) -%} -{{- description | replace('CPUFREE', cpu_free | string) - | replace('MEMFREE', mem_free | string) +{{- description | replace('CPUFREE', cpu_free | string) + | replace('MEMFREE', mem_free | string) | replace('DISKFREE', disk_free | string) | replace('COPPERFREE', copper_free | string) | replace('SFPFREE', sfp_free | string) -}} {%- endmacro -%} +{%- macro get_available_pci(hw_config, device_type, used_indices) -%} +{%- set available = [] -%} +{%- for idx in hw_config.get(device_type, {}).keys() -%} + {%- if idx | string not in used_indices -%} + {%- do available.append(idx) -%} + {%- endif -%} +{%- endfor -%} +{{- available | join(',') -}} +{%- endmacro -%} + {%- for role in HYPERVISORS -%} {%- for hypervisor in HYPERVISORS[role].keys() -%} -{%- set cpu_free = HYPERVISORS[role][hypervisor].available_cpu -%} -{%- set mem_free = HYPERVISORS[role][hypervisor].available_memory -%} -{%- set disk_free = HYPERVISORS[role][hypervisor].available_disk -%} -{%- set copper_free = HYPERVISORS[role][hypervisor].available_copper -%} -{%- set sfp_free = HYPERVISORS[role][hypervisor].available_sfp -%} +{%- set hw_config = HYPERVISORS[role][hypervisor].hardware -%} +{%- set vms = HYPERVISORS[role][hypervisor].vms -%} + +{# Calculate used CPU and memory #} +{%- set used_cpu = 0 -%} +{%- set used_memory = 0 -%} +{%- set ns = namespace(used_cpu=0, used_memory=0) -%} #MOD +{%- for hostname, vm_data in vms.items() -%} +{%- set vm_config = vm_data.config -%} +{%- set ns.used_cpu = ns.used_cpu + vm_config.cpu | int -%} +{%- set ns.used_memory = ns.used_memory + vm_config.memory | int -%} +{%- endfor -%} + +{# Calculate available resources #} +{%- set cpu_free = hw_config.cpu - ns.used_cpu -%} +{%- set mem_free = hw_config.memory - ns.used_memory -%} + +{# Get used PCI indices #} +{%- set used_disk = [] -%} +{%- set used_copper = [] -%} +{%- set used_sfp = [] -%} +{%- for hostname, vm in vms.items() -%} +{%- set config = vm.get('config', {}) -%} +{%- do used_disk.extend((config.get('disk', '') | string).split(',') | map('trim') | list) -%} +{%- do used_copper.extend((config.get('copper', '') | string).split(',') | map('trim') | list) -%} +{%- do used_sfp.extend((config.get('sfp', '') | string).split(',') | map('trim') | list) -%} +{%- endfor -%} + +{# Get available PCI indices #} +{%- set disk_free = get_available_pci(hw_config, 'disk', used_disk) -%} +{%- set copper_free = get_available_pci(hw_config, 'copper', used_copper) -%} +{%- set sfp_free = get_available_pci(hw_config, 'sfp', used_sfp) -%} {%- set updated_template = TEMPLATE.copy() -%} -{%- do updated_template.nodes.update({ +{%- do updated_template.update({ + 'title': hypervisor, 'description': update_description( - TEMPLATE.nodes.description, + TEMPLATE.description, cpu_free, mem_free, disk_free, @@ -29,7 +68,7 @@ sfp_free ) }) -%} -{%- do ANNOTATION.hypervisor.hosts.update({hypervisor: updated_template}) -%} +{%- do ANNOTATION.hypervisor.hosts.update({hypervisor ~ 'VMs': updated_template}) -%} {%- endfor -%} {%- endfor -%}