mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
info to debug. remove old reactors
This commit is contained in:
@@ -1,22 +0,0 @@
|
|||||||
#!py
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import salt.client
|
|
||||||
local = salt.client.LocalClient()
|
|
||||||
|
|
||||||
def run():
|
|
||||||
|
|
||||||
vm_name = data['name']
|
|
||||||
logging.error("setHostname reactor: start for: %s " % vm_name)
|
|
||||||
|
|
||||||
r = local.cmd(vm_name, 'state.apply', ['setup.virt.setHostname'])
|
|
||||||
|
|
||||||
logging.error("setHostname reactor: return for %s: %s " % (vm_name,r))
|
|
||||||
logging.error("setHostname reactor: end for: %s " % vm_name)
|
|
||||||
|
|
||||||
return {}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
#!py
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import salt.client
|
|
||||||
local = salt.client.LocalClient()
|
|
||||||
|
|
||||||
def run():
|
|
||||||
|
|
||||||
vm_name = data['name']
|
|
||||||
logging.error("setSalt reactor: start for: %s " % vm_name)
|
|
||||||
|
|
||||||
r = local.cmd(vm_name, 'state.apply', ['setup.virt.setSalt'])
|
|
||||||
|
|
||||||
logging.error("setSalt reactor: return for: %s: %s " % (vm_name,r))
|
|
||||||
logging.error("setSalt reactor: end for: %s " % vm_name)
|
|
||||||
|
|
||||||
return {}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
#!py
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import salt.client
|
|
||||||
local = salt.client.LocalClient()
|
|
||||||
|
|
||||||
def run():
|
|
||||||
|
|
||||||
vm_name = data['name']
|
|
||||||
logging.error("sominion reactor: start for: %s " % vm_name)
|
|
||||||
|
|
||||||
r = local.cmd(vm_name, 'state.apply', ['setup.virt.sominion'])
|
|
||||||
|
|
||||||
logging.error("sominion reactor: end for: %s " % vm_name)
|
|
||||||
|
|
||||||
return {}
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
#!py
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import yaml
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
|
|
||||||
def run():
|
|
||||||
|
|
||||||
def release_compute():
|
|
||||||
compute = hv_data['hypervisor']['hardware'][hw_type]
|
|
||||||
compute.update({'free': compute.get('free') + vm_data.get(hw_type)})
|
|
||||||
logging.error("virtReboot reactor: claiming %s compute: %s " % (hw_type,compute))
|
|
||||||
|
|
||||||
def release_pci():
|
|
||||||
free_hw = hv_data['hypervisor']['hardware'][hw_type]['free']
|
|
||||||
# this could be 0 if nothing is assigned
|
|
||||||
if vm_data[hw_type] != 0:
|
|
||||||
for hw in vm_data[hw_type]:
|
|
||||||
f_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['claimed'].pop(hw)}
|
|
||||||
free_hw.update(f_hw)
|
|
||||||
logging.error("virtReleaseHardware reactor: released %s: %s" % (hw_type, f_hw))
|
|
||||||
|
|
||||||
def get_hypervisor():
|
|
||||||
base_dir = '/opt/so/saltstack/local/pillar/hypervisor'
|
|
||||||
pattern = os.path.join(base_dir, '**', vm_name + '.sls')
|
|
||||||
files = glob.glob(pattern, recursive=True)
|
|
||||||
logging.error("virtReleaseHardware reactor: files: %s " % files)
|
|
||||||
if files:
|
|
||||||
return files[0].split('/')[7]
|
|
||||||
|
|
||||||
vm_name = data['name']
|
|
||||||
# since the vm has been destroyed, we can't get the hypervisor_host grain
|
|
||||||
hv_name = get_hypervisor()
|
|
||||||
logging.error("virtReleaseHardware reactor: hv_name: %s " % hv_name)
|
|
||||||
|
|
||||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
|
|
||||||
try:
|
|
||||||
vm_data=yaml.safe_load(f)
|
|
||||||
logging.error("virtReleaseHardware reactor: vm_data %s " % vm_data)
|
|
||||||
#logging.error(yaml.safe_load(f))
|
|
||||||
except yaml.YAMLError as exc:
|
|
||||||
logging.error(exc)
|
|
||||||
|
|
||||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls") as f:
|
|
||||||
try:
|
|
||||||
hv_data=yaml.safe_load(f)
|
|
||||||
logging.error("virtReleaseHardware reactor: hv_data: %s " % hv_data)
|
|
||||||
#logging.error(yaml.safe_load(f))
|
|
||||||
except yaml.YAMLError as exc:
|
|
||||||
logging.error(exc)
|
|
||||||
|
|
||||||
for hw_type in ['disks', 'copper', 'sfp']:
|
|
||||||
release_pci()
|
|
||||||
|
|
||||||
for hw_type in ['cpu', 'memory']:
|
|
||||||
release_compute()
|
|
||||||
|
|
||||||
# update the free hardware for the hypervisor
|
|
||||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:
|
|
||||||
yaml.dump(hv_data, f, default_flow_style=False)
|
|
||||||
|
|
||||||
# remove the old vm_data file since the vm has been purged
|
|
||||||
os.remove("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls")
|
|
||||||
# remove minion pillar files
|
|
||||||
os.remove("/opt/so/saltstack/local/pillar/minions/adv_" + vm_name + ".sls")
|
|
||||||
os.remove("/opt/so/saltstack/local/pillar/minions/" + vm_name + ".sls")
|
|
||||||
|
|
||||||
return {}
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
#!py
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import salt.client
|
|
||||||
local = salt.client.LocalClient()
|
|
||||||
import yaml
|
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
def run():
|
|
||||||
|
|
||||||
def claim_compute(hw_type):
|
|
||||||
compute = hv_data['hypervisor']['hardware'][hw_type]
|
|
||||||
compute.update({'free': compute.get('free') - vm_data.get(hw_type)})
|
|
||||||
logging.error("virtUpdate reactor: claiming %s compute: %s " % (hw_type,compute))
|
|
||||||
|
|
||||||
def claim_pci(hw_type):
|
|
||||||
claimed_hw = hv_data['hypervisor']['hardware'][hw_type]['claimed']
|
|
||||||
# if a list of devices was defined
|
|
||||||
if type(vm_data[hw_type]) == list:
|
|
||||||
for hw in vm_data[hw_type]:
|
|
||||||
try:
|
|
||||||
c_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw)}
|
|
||||||
claimed_hw.update(c_hw)
|
|
||||||
host_devices.append(c_hw[hw])
|
|
||||||
except KeyError:
|
|
||||||
logging.error("virtUpdate reactor: could not claim %s with key %s " % (hw_type,hw))
|
|
||||||
return {'key1': 'val1'}
|
|
||||||
# if a number of devices was defined
|
|
||||||
else:
|
|
||||||
n = vm_data[hw_type]
|
|
||||||
vm_data[hw_type] = []
|
|
||||||
# grab the first number of devices as defined for the node type
|
|
||||||
claiming_hw = list(hv_data['hypervisor']['hardware'][hw_type]['free'].items())[:n]
|
|
||||||
logging.error("virtUpdate reactor: claiming %s hardware: %s " % (hw_type,claiming_hw))
|
|
||||||
# claiming_hw is a list of tuples containing (numerical_id, pci_id)
|
|
||||||
# claiming_hw example: [(1, 'pci_0000_c4_00_0'), (2, 'pci_0000_c4_00_1')]
|
|
||||||
for hw in claiming_hw:
|
|
||||||
c_hw = {hw[0]: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw[0])}
|
|
||||||
claimed_hw.update(c_hw)
|
|
||||||
vm_data[hw_type].append(hw[0])
|
|
||||||
host_devices.append(hw[1])
|
|
||||||
logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw)
|
|
||||||
|
|
||||||
vm_name = data['id']
|
|
||||||
logging.error("virtUpdate reactor: vm_name: %s " % vm_name)
|
|
||||||
hv_name = local.cmd(vm_name, 'grains.get', ['hypervisor_host']).get(vm_name)
|
|
||||||
logging.error("virtUpdate reactor: hv_name: %s " % hv_name)
|
|
||||||
|
|
||||||
host_devices = []
|
|
||||||
|
|
||||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
|
|
||||||
try:
|
|
||||||
vm_data=yaml.safe_load(f)
|
|
||||||
logging.error("virtUpdate reactor: vm_data %s " % vm_data)
|
|
||||||
#logging.error(yaml.safe_load(f))
|
|
||||||
except yaml.YAMLError as exc:
|
|
||||||
logging.error(exc)
|
|
||||||
|
|
||||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls") as f:
|
|
||||||
try:
|
|
||||||
hv_data=yaml.safe_load(f)
|
|
||||||
logging.error("virtUpdate reactor: hv_data: %s " % hv_data)
|
|
||||||
#logging.error(yaml.safe_load(f))
|
|
||||||
except yaml.YAMLError as exc:
|
|
||||||
logging.error(exc)
|
|
||||||
|
|
||||||
r = local.cmd(hv_name, 'virt.shutdown', ['vm_=' + vm_name])
|
|
||||||
logging.error("virtUpdate reactor: virt.shutdown: %s return: %s " % (vm_name,r))
|
|
||||||
|
|
||||||
c = 0
|
|
||||||
while True:
|
|
||||||
if c == 60:
|
|
||||||
logging.error("virtUpdate reactor: vm_name: %s failed virt.shutdown in time " % vm_name)
|
|
||||||
return {}
|
|
||||||
r = local.cmd(hv_name, 'virt.shutdown', ['vm_=' + vm_name])
|
|
||||||
logging.error("virtUpdate reactor: virt.shutdown: %s return: %s " % (vm_name,r))
|
|
||||||
if r.get(hv_name):
|
|
||||||
break
|
|
||||||
c += 1
|
|
||||||
sleep(1)
|
|
||||||
|
|
||||||
c = 0
|
|
||||||
while True:
|
|
||||||
if c == 60:
|
|
||||||
logging.error("virtUpdate reactor: vm_name: %s failed to go inactive in time " % vm_name)
|
|
||||||
return {}
|
|
||||||
r = local.cmd(hv_name, 'virt.list_inactive_vms')
|
|
||||||
logging.error("virtUpdate reactor: virt.list_inactive_vms: %s " % r.get(hv_name))
|
|
||||||
if vm_name in r.get(hv_name):
|
|
||||||
break
|
|
||||||
c += 1
|
|
||||||
sleep(1)
|
|
||||||
|
|
||||||
for hw_type in ['disks', 'copper', 'sfp']:
|
|
||||||
claim_pci(hw_type)
|
|
||||||
|
|
||||||
for hw_type in ['cpu', 'memory']:
|
|
||||||
claim_compute(hw_type)
|
|
||||||
|
|
||||||
logging.error("virtUpdate reactor: host_devices: %s " % host_devices)
|
|
||||||
|
|
||||||
# update the claimed hardware for the hypervisor
|
|
||||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:
|
|
||||||
yaml.dump(hv_data, f, default_flow_style=False)
|
|
||||||
|
|
||||||
# since the original hw request provided was a count of hw instead of specific pci ids
|
|
||||||
# we need to update the vm_data file with the assigned pci ids that were claimed
|
|
||||||
# update the vm_data file with the hardware it claimed
|
|
||||||
logging.error("virtUpdate reactor: new vm_data: %s " % vm_data)
|
|
||||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls", 'w') as f:
|
|
||||||
yaml.dump(vm_data, f, default_flow_style=False)
|
|
||||||
|
|
||||||
mem = vm_data['memory'] * 1024
|
|
||||||
r = local.cmd(hv_name, 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)])
|
|
||||||
logging.error("virtUpdate reactor: virt.update: vm_name: %s return: %s" % (vm_name,r))
|
|
||||||
|
|
||||||
r = local.cmd(hv_name, 'virt.start', ['name=' + vm_name])
|
|
||||||
logging.error("virtUpdate reactor: virt.start: vm_name: %s return: %s" % (vm_name,r))
|
|
||||||
|
|
||||||
return {}
|
|
||||||
@@ -316,7 +316,7 @@ def start(interval: int = DEFAULT_INTERVAL,
|
|||||||
interval: Time in seconds between engine runs (managed by salt-master)
|
interval: Time in seconds between engine runs (managed by salt-master)
|
||||||
base_path: Base path containing hypervisor configurations
|
base_path: Base path containing hypervisor configurations
|
||||||
"""
|
"""
|
||||||
log.info("Starting virtual power manager engine")
|
log.debug("Starting virtual power manager engine")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Process each hypervisor directory
|
# Process each hypervisor directory
|
||||||
@@ -324,7 +324,7 @@ def start(interval: int = DEFAULT_INTERVAL,
|
|||||||
if os.path.isdir(hypervisor_path):
|
if os.path.isdir(hypervisor_path):
|
||||||
process_hypervisor_power_requests(hypervisor_path)
|
process_hypervisor_power_requests(hypervisor_path)
|
||||||
|
|
||||||
log.info("Virtual power manager completed successfully")
|
log.debug("Virtual power manager completed successfully")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.error("Error in virtual power manager: %s", str(e))
|
log.error("Error in virtual power manager: %s", str(e))
|
||||||
|
|||||||
Reference in New Issue
Block a user