mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-09 02:32:46 +01:00
virt start
This commit is contained in:
26
salt/reactor/createEmptyPillar.sls
Normal file
26
salt/reactor/createEmptyPillar.sls
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
#!py
|
||||
|
||||
import logging
|
||||
import salt.client
|
||||
local = salt.client.LocalClient()
|
||||
from subprocess import call
|
||||
import yaml
|
||||
|
||||
import os
|
||||
|
||||
def run():
|
||||
#logging.error("createEmptyPillar reactor: data: %s" % data)
|
||||
vm_name = data['kwargs']['name']
|
||||
logging.error("createEmptyPillar reactor: vm_name: %s" % vm_name)
|
||||
pillar_root = '/opt/so/saltstack/local/pillar/minions/'
|
||||
pillar_files = ['adv_' + vm_name + '.sls', vm_name + '.sls']
|
||||
for f in pillar_files:
|
||||
if not os.path.exists(pillar_root + f):
|
||||
os.mknod(pillar_root + f)
|
||||
|
||||
return {}
|
||||
11
salt/reactor/deleteKey.sls
Normal file
11
salt/reactor/deleteKey.sls
Normal file
@@ -0,0 +1,11 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
remove_key:
|
||||
wheel.key.delete:
|
||||
- args:
|
||||
- match: {{ data['name'] }}
|
||||
|
||||
{% do salt.log.info('deleteKey reactor: deleted minion key: %s' % data['name']) %}
|
||||
37
salt/reactor/setup.sls
Normal file
37
salt/reactor/setup.sls
Normal file
@@ -0,0 +1,37 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
#!py
|
||||
|
||||
import logging
|
||||
import salt.client
|
||||
local = salt.client.LocalClient()
|
||||
from subprocess import call
|
||||
import yaml
|
||||
|
||||
def run():
|
||||
minionid = data['id']
|
||||
hv_name = 'jppvirt'
|
||||
DATA = data['data']
|
||||
logging.error("setup reactor: %s " % DATA)
|
||||
|
||||
vm_out_data = {
|
||||
'cpu': DATA['CPU'],
|
||||
'memory': DATA['MEMORY'],
|
||||
'disks': DATA['DISKS'],
|
||||
'copper': DATA['COPPER'],
|
||||
'sfp': DATA['SFP']
|
||||
}
|
||||
|
||||
logging.error("setup reactor: vm_out_data: %s " % vm_out_data)
|
||||
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + minionid + ".sls", 'w') as f:
|
||||
yaml.dump(vm_out_data, f, default_flow_style=False)
|
||||
|
||||
rc = call("NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVirt -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -a=" + DATA['INTERFACE'] + " -c=" + str(DATA['CORECOUNT']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'", shell=True)
|
||||
|
||||
logging.error('setup_reactor: rc: %s' % rc)
|
||||
|
||||
return {}
|
||||
67
salt/reactor/virtReleaseHardware.sls
Normal file
67
salt/reactor/virtReleaseHardware.sls
Normal file
@@ -0,0 +1,67 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
#!py
|
||||
|
||||
import logging
|
||||
import salt.client
|
||||
local = salt.client.LocalClient()
|
||||
from subprocess import call
|
||||
import yaml
|
||||
|
||||
import os
|
||||
|
||||
def run():
|
||||
|
||||
def release_compute(hw_type):
|
||||
compute = hv_data['hypervisor']['hardware'][hw_type]
|
||||
compute.update({'free': compute.get('free') + vm_data.get(hw_type)})
|
||||
logging.error("virtReboot reactor: claiming %s compute: %s " % (hw_type,compute))
|
||||
|
||||
def release_pci(hw_type):
|
||||
free_hw = hv_data['hypervisor']['hardware'][hw_type]['free']
|
||||
for hw in vm_data[hw_type]:
|
||||
f_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['claimed'].pop(hw)}
|
||||
free_hw.update(f_hw)
|
||||
logging.error("virtReleaseHardware reactor: released %s: %s" % (hw_type, f_hw))
|
||||
|
||||
|
||||
|
||||
vm_name = data['name']
|
||||
hv_name = 'jppvirt'
|
||||
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
|
||||
try:
|
||||
vm_data=yaml.safe_load(f)
|
||||
logging.error("virtReleaseHardware reactor: vm_data %s " % vm_data)
|
||||
#logging.error(yaml.safe_load(f))
|
||||
except yaml.YAMLError as exc:
|
||||
logging.error(exc)
|
||||
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls") as f:
|
||||
try:
|
||||
hv_data=yaml.safe_load(f)
|
||||
logging.error("virtReleaseHardware reactor: hv_data: %s " % hv_data)
|
||||
#logging.error(yaml.safe_load(f))
|
||||
except yaml.YAMLError as exc:
|
||||
logging.error(exc)
|
||||
|
||||
for hw_type in ['disks', 'copper', 'sfp']:
|
||||
release_pci(hw_type)
|
||||
|
||||
for hw_type in ['cpu', 'memory']:
|
||||
release_compute(hw_type)
|
||||
|
||||
# update the free hardware for the hypervisor
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:
|
||||
yaml.dump(hv_data, f, default_flow_style=False)
|
||||
|
||||
# remove the old vm_data file since the vm has been purged
|
||||
os.remove("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls")
|
||||
# remove minion pillar files
|
||||
os.remove("/opt/so/saltstack/local/pillar/minions/adv_" + vm_name + ".sls")
|
||||
os.remove("/opt/so/saltstack/local/pillar/minions/" + vm_name + ".sls")
|
||||
|
||||
return {}
|
||||
95
salt/reactor/virtUpdate.sls
Normal file
95
salt/reactor/virtUpdate.sls
Normal file
@@ -0,0 +1,95 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
#!py
|
||||
|
||||
import logging
|
||||
import salt.client
|
||||
local = salt.client.LocalClient()
|
||||
from subprocess import call
|
||||
import yaml
|
||||
|
||||
|
||||
def run():
|
||||
|
||||
def claim_compute(hw_type):
|
||||
compute = hv_data['hypervisor']['hardware'][hw_type]
|
||||
compute.update({'free': compute.get('free') - vm_data.get(hw_type)})
|
||||
logging.error("virtUpdate reactor: claiming %s compute: %s " % (hw_type,compute))
|
||||
|
||||
|
||||
def claim_pci(hw_type):
|
||||
claimed_hw = hv_data['hypervisor']['hardware'][hw_type]['claimed']
|
||||
# if a list of devices was defined
|
||||
if type(vm_data[hw_type]) == list:
|
||||
for hw in vm_data[hw_type]:
|
||||
c_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw)}
|
||||
claimed_hw.update(c_hw)
|
||||
host_devices.append(c_hw[hw])
|
||||
#hv_data['hypervisor']['hardware'][hw_type].update({'claimed': claimed_hw})
|
||||
# if a number of devices was defined
|
||||
else:
|
||||
n = vm_data[hw_type]
|
||||
vm_data[hw_type] = []
|
||||
# grab the first number of devices as defined for the node type
|
||||
claiming_hw = list(hv_data['hypervisor']['hardware'][hw_type]['free'].items())[:n]
|
||||
logging.error("virtUpdate reactor: claiming %s hardware: %s " % (hw_type,claiming_hw))
|
||||
# claiming_hw is a list of tuples containing (numerical_id, pci_id)
|
||||
# claiming_hw example: [(1, 'pci_0000_c4_00_0'), (2, 'pci_0000_c4_00_1')]
|
||||
for hw in claiming_hw:
|
||||
c_hw = {hw[0]: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw[0])}
|
||||
claimed_hw.update(c_hw)
|
||||
vm_data[hw_type].append(hw[0])
|
||||
host_devices.append(hw[1])
|
||||
logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw)
|
||||
|
||||
vm_name = data['name']
|
||||
hv_name = 'jppvirt'
|
||||
host_devices = []
|
||||
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
|
||||
try:
|
||||
vm_data=yaml.safe_load(f)
|
||||
logging.error("virtUpdate reactor: vm_data %s " % vm_data)
|
||||
#logging.error(yaml.safe_load(f))
|
||||
except yaml.YAMLError as exc:
|
||||
logging.error(exc)
|
||||
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls") as f:
|
||||
try:
|
||||
hv_data=yaml.safe_load(f)
|
||||
logging.error("virtUpdate reactor: hv_data: %s " % hv_data)
|
||||
#logging.error(yaml.safe_load(f))
|
||||
except yaml.YAMLError as exc:
|
||||
logging.error(exc)
|
||||
|
||||
local.cmd('jppvirt', 'virt.stop', ['name=' + vm_name])
|
||||
|
||||
for hw_type in ['disks', 'copper', 'sfp']:
|
||||
claim_pci(hw_type)
|
||||
|
||||
for hw_type in ['cpu', 'memory']:
|
||||
claim_compute(hw_type)
|
||||
|
||||
logging.error("virtUpdate reactor: host_devices: %s " % host_devices)
|
||||
|
||||
# update the claimed hardware for the hypervisor
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:
|
||||
yaml.dump(hv_data, f, default_flow_style=False)
|
||||
|
||||
# since the original hw request provided was a count of hw instead of specific pci ids
|
||||
# we need to update the vm_data file with the assigned pci ids that were claimed
|
||||
# update the vm_data file with the hardware it claimed
|
||||
logging.error("virtUpdate reactor: new vm_data: %s " % vm_data)
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls", 'w') as f:
|
||||
yaml.dump(vm_data, f, default_flow_style=False)
|
||||
|
||||
mem = vm_data['memory'] * 1024
|
||||
r = local.cmd('jppvirt', 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)])
|
||||
logging.error("virtUpdate reactor: virt.update: %s" % r)
|
||||
|
||||
local.cmd('jppvirt', 'virt.start', ['name=' + vm_name])
|
||||
|
||||
return {}
|
||||
Reference in New Issue
Block a user