This commit is contained in:
m0duspwnens
2024-08-20 08:31:46 -04:00
parent 7698243caf
commit 205560cc95
9 changed files with 96 additions and 85 deletions

View File

View File

@@ -11,8 +11,8 @@ import yaml
def run():
minionid = data['id']
hv_name = 'jppvirt'
DATA = data['data']
hv_name = DATA['HYPERVISOR_HOST']
logging.error("setup reactor: %s " % DATA)
vm_out_data = {

View File

@@ -8,25 +8,36 @@
import logging
import yaml
import os
import glob
def run():
def release_compute(hw_type):
def release_compute():
compute = hv_data['hypervisor']['hardware'][hw_type]
compute.update({'free': compute.get('free') + vm_data.get(hw_type)})
logging.error("virtReboot reactor: claiming %s compute: %s " % (hw_type,compute))
def release_pci(hw_type):
def release_pci():
free_hw = hv_data['hypervisor']['hardware'][hw_type]['free']
for hw in vm_data[hw_type]:
f_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['claimed'].pop(hw)}
free_hw.update(f_hw)
logging.error("virtReleaseHardware reactor: released %s: %s" % (hw_type, f_hw))
# this could be 0 if nothing is assigned
if vm_data[hw_type] != 0:
for hw in vm_data[hw_type]:
f_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['claimed'].pop(hw)}
free_hw.update(f_hw)
logging.error("virtReleaseHardware reactor: released %s: %s" % (hw_type, f_hw))
def get_hypervisor():
base_dir = '/opt/so/saltstack/local/pillar/hypervisor'
pattern = os.path.join(base_dir, '**', vm_name + '.sls')
files = glob.glob(pattern, recursive=True)
logging.error("virtReleaseHardware reactor: files: %s " % files)
if files:
return files[0].split('/')[7]
vm_name = data['name']
hv_name = 'jppvirt'
# since the vm has been destroyed, we can't get the hypervisor_host grain
hv_name = get_hypervisor()
logging.error("virtReleaseHardware reactor: hv_name: %s " % hv_name)
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
try:
@@ -45,10 +56,10 @@ def run():
logging.error(exc)
for hw_type in ['disks', 'copper', 'sfp']:
release_pci(hw_type)
release_pci()
for hw_type in ['cpu', 'memory']:
release_compute(hw_type)
release_compute()
# update the free hardware for the hypervisor
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:

View File

@@ -23,10 +23,13 @@ def run():
# if a list of devices was defined
if type(vm_data[hw_type]) == list:
for hw in vm_data[hw_type]:
c_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw)}
claimed_hw.update(c_hw)
host_devices.append(c_hw[hw])
#hv_data['hypervisor']['hardware'][hw_type].update({'claimed': claimed_hw})
try:
c_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw)}
claimed_hw.update(c_hw)
host_devices.append(c_hw[hw])
except KeyError:
logging.error("virtUpdate reactor: could not claim %s with key %s " % (hw_type,hw))
return {'key1': 'val1'}
# if a number of devices was defined
else:
n = vm_data[hw_type]
@@ -44,7 +47,8 @@ def run():
logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw)
vm_name = data['name']
hv_name = 'jppvirt'
hv_name = local.cmd(vm_name, 'grains.get', ['hypervisor_host'])
host_devices = []
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
@@ -63,7 +67,7 @@ def run():
except yaml.YAMLError as exc:
logging.error(exc)
local.cmd('jppvirt', 'virt.stop', ['name=' + vm_name])
local.cmd(hv_name, 'virt.stop', ['name=' + vm_name])
for hw_type in ['disks', 'copper', 'sfp']:
claim_pci(hw_type)
@@ -85,9 +89,9 @@ def run():
yaml.dump(vm_data, f, default_flow_style=False)
mem = vm_data['memory'] * 1024
r = local.cmd('jppvirt', 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)])
r = local.cmd(hv_name, 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)])
logging.error("virtUpdate reactor: virt.update: %s" % r)
local.cmd('jppvirt', 'virt.start', ['name=' + vm_name])
local.cmd(hv_name, 'virt.start', ['name=' + vm_name])
return {}

View File

@@ -1,59 +1,14 @@
searchnode:
provider: local-kvm
base_domain: jppol9vm
ip_source: qemu-agent
ssh_username: jpatterson
private_key: /home/jpatterson/.ssh/id_rsa
sudo: True
# /tmp is mounted noexec.. do workaround
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
# grains to add to the minion
#grains:
# clones-are-awesome: true
# override minion settings
minion:
master: jppvirt
master_port: 4506
startup_states: sls
sls_list:
- setHostname
{#- Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
sensor:
provider: local-kvm
base_domain: jppol9vm
ip_source: qemu-agent
ssh_username: jpatterson
private_key: /home/jpatterson/.ssh/id_rsa
sudo: True
#preflight_cmds:
# - echo "do something"
# - hostname
# /tmp is mounted noexec.. do workaround
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
# the destination directory will be created if it doesn't exist
#file_map:
# /srv/salt/filemap.txt: /remote/path/to/use/custom/filemap.txt
#inline_script:
# - echo "SLEEPING"
# - hostname
# grains to add to the minion
#grains:
# clones-are-awesome: true
# override minion settings
minion:
master: jppvirt
master_port: 4506
startup_states: sls
sls_list:
- setHostname
{%- for host in HYPERVISORS %}
{%- for role, hosts in HYPERVISORS.items() %}
{%- for host in hosts.keys() -%}
core-{{host}}:
provider: kvm-ssh-{{host}}
base_domain: jppol9vm
base_domain: coreol9
ip_source: qemu-agent
ssh_username: soqemussh
private_key: /home/soqemussh/.ssh/id_ed25519
@@ -61,10 +16,28 @@ core-{{host}}:
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
minion:
master: jpp90man
master: {{ grains.host }}
master_port: 4506
startup_states: sls
sls_list:
- setHostname
- setup.virt.setHostname
use_superseded:
- module.run
features:
x509_v2: true
log_level: info
log_level_logfile: info
log_file: /opt/so/log/salt/minion
grains:
hypervisor_host: {{host ~ "_" ~ role}}
#preflight_cmds:
# - echo "preflight_cmds"
# the destination directory will be created if it doesn't exist
file_map:
/opt/so/saltstack/default/salt/repo/client/files/oracle/keys/securityonion.pub: /tmp/securityonion.pub
inline_script:
- "rpm --import /tmp/securityonion.pub"
# grains to add to the minion
{%- endfor %}
{%- endfor %}

View File

@@ -1,15 +1,22 @@
# Set up a provider with qemu+ssh protocol
{%- for host in HYPERVISORS %}
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{#- provider with qemu+ssh protocol #}
{%- for role, hosts in HYPERVISORS.items() %}
{%- for host in hosts.keys() %}
kvm-ssh-{{host}}:
driver: libvirt
url: qemu+ssh://soqemussh@{{host}}/system?socket=/var/run/libvirt/libvirt-sock
{%- endfor %}
{%- endfor %}
# Or connect to a local libvirt instance
{#- local libvirt instance #}
#local-kvm:
# driver: libvirt
# url: qemu:///system
# work around flag for XML validation errors while cloning
{#- work around flag for XML validation errors while cloning #}
# validate_xml: no

View File

@@ -6,6 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'salt/map.jinja' import SALTVERSION %}
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
include:
- libvirt.packages
@@ -20,7 +21,7 @@ cloud_providers:
- name: /etc/salt/cloud.providers.d/libvirt.conf
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
- defaults:
HYPERVISORS: {{pillar.hypervisor.nodes.hypervisor}}
HYPERVISORS: {{HYPERVISORS}}
- template: jinja
cloud_profiles:
@@ -28,9 +29,19 @@ cloud_profiles:
- name: /etc/salt/cloud.profiles.d/socloud.conf
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
- defaults:
HYPERVISORS: {{pillar.hypervisor.nodes.hypervisor}}
HYPERVISORS: {{HYPERVISORS}}
- template: jinja
{% for role, hosts in HYPERVISORS.items() %}
{% for host in hosts.keys() %}
hypervisor_{{host}}_{{role}}_pillar_dir:
file.directory:
- name: /opt/so/saltstack/local/pillar/hypervisor/{{host}}_{{role}}
{% endfor %}
{% endfor %}
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -3,7 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'soinstall.map.jinja' import DATA %}
{% from 'setup/virt/soinstall.map.jinja' import DATA %}
setHostname_{{grains.id.split("_") | first}}:
network.system:
@@ -16,6 +16,7 @@ create_pillar:
event.send:
- name: setup/so-minion
- data:
HYPERVISOR_HOST: {{ grains.hypervisor_host }}
MAINIP: {{ DATA.MAINIP }}
MNIC: {{ DATA.MNIC }}
NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}'
@@ -35,6 +36,10 @@ create_pillar:
COPPER: {{ DATA.COPPER }}
SFP: {{ DATA.SFP }}
set_role_grain:
grains.present:
- name: role
- value: so-{{ grains.id.split("_") | last }}
# set event for firewall rules - so-firewall-minion
@@ -47,7 +52,7 @@ clean_sls_list:
clean_setHostname:
file.line:
- name: /etc/salt/minion
- match: '- setHostname'
- match: '- setup.virt.setHostname'
- mode: delete
- onchanges:
- file: clean_sls_list

View File

@@ -4,7 +4,7 @@
Elastic License 2.0. #}
{% set nodetype = grains.id.split("_") | last %}
{% import_yaml nodetype ~ '.yaml' as DATA %}
{% import_yaml 'setup/virt/' ~ nodetype ~ '.yaml' as DATA %}
{% set total_mem = grains.mem_total %}
{% do DATA.update({'MAINIP': grains.ip_interfaces.get(DATA.MNIC)[0]}) %}
@@ -12,7 +12,7 @@
{% do DATA.update({'CPUCORES': grains.num_cpus}) %}
{% if nodetype = "searchnode" %}
{% if nodetype == "searchnode" %}
{% do DATA.update({'LSHOSTNAME': grains.host}) %}