This commit is contained in:
m0duspwnens
2024-08-20 08:31:46 -04:00
parent 7698243caf
commit 205560cc95
9 changed files with 96 additions and 85 deletions

View File

View File

@@ -11,8 +11,8 @@ import yaml
def run(): def run():
minionid = data['id'] minionid = data['id']
hv_name = 'jppvirt'
DATA = data['data'] DATA = data['data']
hv_name = DATA['HYPERVISOR_HOST']
logging.error("setup reactor: %s " % DATA) logging.error("setup reactor: %s " % DATA)
vm_out_data = { vm_out_data = {

View File

@@ -1,32 +1,43 @@
#!py #!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
import logging import logging
import yaml import yaml
import os import os
import glob
def run(): def run():
def release_compute(hw_type): def release_compute():
compute = hv_data['hypervisor']['hardware'][hw_type] compute = hv_data['hypervisor']['hardware'][hw_type]
compute.update({'free': compute.get('free') + vm_data.get(hw_type)}) compute.update({'free': compute.get('free') + vm_data.get(hw_type)})
logging.error("virtReboot reactor: claiming %s compute: %s " % (hw_type,compute)) logging.error("virtReboot reactor: claiming %s compute: %s " % (hw_type,compute))
def release_pci(hw_type): def release_pci():
free_hw = hv_data['hypervisor']['hardware'][hw_type]['free'] free_hw = hv_data['hypervisor']['hardware'][hw_type]['free']
for hw in vm_data[hw_type]: # this could be 0 if nothing is assigned
f_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['claimed'].pop(hw)} if vm_data[hw_type] != 0:
free_hw.update(f_hw) for hw in vm_data[hw_type]:
logging.error("virtReleaseHardware reactor: released %s: %s" % (hw_type, f_hw)) f_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['claimed'].pop(hw)}
free_hw.update(f_hw)
logging.error("virtReleaseHardware reactor: released %s: %s" % (hw_type, f_hw))
def get_hypervisor():
base_dir = '/opt/so/saltstack/local/pillar/hypervisor'
pattern = os.path.join(base_dir, '**', vm_name + '.sls')
files = glob.glob(pattern, recursive=True)
logging.error("virtReleaseHardware reactor: files: %s " % files)
if files:
return files[0].split('/')[7]
vm_name = data['name'] vm_name = data['name']
hv_name = 'jppvirt' # since the vm has been destroyed, we can't get the hypervisor_host grain
hv_name = get_hypervisor()
logging.error("virtReleaseHardware reactor: hv_name: %s " % hv_name)
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f: with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
try: try:
@@ -45,10 +56,10 @@ def run():
logging.error(exc) logging.error(exc)
for hw_type in ['disks', 'copper', 'sfp']: for hw_type in ['disks', 'copper', 'sfp']:
release_pci(hw_type) release_pci()
for hw_type in ['cpu', 'memory']: for hw_type in ['cpu', 'memory']:
release_compute(hw_type) release_compute()
# update the free hardware for the hypervisor # update the free hardware for the hypervisor
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f: with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:

View File

@@ -23,10 +23,13 @@ def run():
# if a list of devices was defined # if a list of devices was defined
if type(vm_data[hw_type]) == list: if type(vm_data[hw_type]) == list:
for hw in vm_data[hw_type]: for hw in vm_data[hw_type]:
c_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw)} try:
claimed_hw.update(c_hw) c_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw)}
host_devices.append(c_hw[hw]) claimed_hw.update(c_hw)
#hv_data['hypervisor']['hardware'][hw_type].update({'claimed': claimed_hw}) host_devices.append(c_hw[hw])
except KeyError:
logging.error("virtUpdate reactor: could not claim %s with key %s " % (hw_type,hw))
return {'key1': 'val1'}
# if a number of devices was defined # if a number of devices was defined
else: else:
n = vm_data[hw_type] n = vm_data[hw_type]
@@ -44,7 +47,8 @@ def run():
logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw) logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw)
vm_name = data['name'] vm_name = data['name']
hv_name = 'jppvirt' hv_name = local.cmd(vm_name, 'grains.get', ['hypervisor_host'])
host_devices = [] host_devices = []
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f: with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
@@ -63,7 +67,7 @@ def run():
except yaml.YAMLError as exc: except yaml.YAMLError as exc:
logging.error(exc) logging.error(exc)
local.cmd('jppvirt', 'virt.stop', ['name=' + vm_name]) local.cmd(hv_name, 'virt.stop', ['name=' + vm_name])
for hw_type in ['disks', 'copper', 'sfp']: for hw_type in ['disks', 'copper', 'sfp']:
claim_pci(hw_type) claim_pci(hw_type)
@@ -85,9 +89,9 @@ def run():
yaml.dump(vm_data, f, default_flow_style=False) yaml.dump(vm_data, f, default_flow_style=False)
mem = vm_data['memory'] * 1024 mem = vm_data['memory'] * 1024
r = local.cmd('jppvirt', 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)]) r = local.cmd(hv_name, 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)])
logging.error("virtUpdate reactor: virt.update: %s" % r) logging.error("virtUpdate reactor: virt.update: %s" % r)
local.cmd('jppvirt', 'virt.start', ['name=' + vm_name]) local.cmd(hv_name, 'virt.start', ['name=' + vm_name])
return {} return {}

View File

@@ -1,59 +1,14 @@
searchnode: {#- Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
provider: local-kvm or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
base_domain: jppol9vm https://securityonion.net/license; you may not use this file except in compliance with the
ip_source: qemu-agent Elastic License 2.0. #}
ssh_username: jpatterson
private_key: /home/jpatterson/.ssh/id_rsa
sudo: True
# /tmp is mounted noexec.. do workaround
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
# grains to add to the minion
#grains:
# clones-are-awesome: true
# override minion settings
minion:
master: jppvirt
master_port: 4506
startup_states: sls
sls_list:
- setHostname
sensor: {%- for role, hosts in HYPERVISORS.items() %}
provider: local-kvm {%- for host in hosts.keys() -%}
base_domain: jppol9vm
ip_source: qemu-agent
ssh_username: jpatterson
private_key: /home/jpatterson/.ssh/id_rsa
sudo: True
#preflight_cmds:
# - echo "do something"
# - hostname
# /tmp is mounted noexec.. do workaround
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
# the destination directory will be created if it doesn't exist
#file_map:
# /srv/salt/filemap.txt: /remote/path/to/use/custom/filemap.txt
#inline_script:
# - echo "SLEEPING"
# - hostname
# grains to add to the minion
#grains:
# clones-are-awesome: true
# override minion settings
minion:
master: jppvirt
master_port: 4506
startup_states: sls
sls_list:
- setHostname
{%- for host in HYPERVISORS %}
core-{{host}}: core-{{host}}:
provider: kvm-ssh-{{host}} provider: kvm-ssh-{{host}}
base_domain: jppol9vm base_domain: coreol9
ip_source: qemu-agent ip_source: qemu-agent
ssh_username: soqemussh ssh_username: soqemussh
private_key: /home/soqemussh/.ssh/id_ed25519 private_key: /home/soqemussh/.ssh/id_ed25519
@@ -61,10 +16,28 @@ core-{{host}}:
deploy_command: sh /tmp/.saltcloud-*/deploy.sh deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1 script_args: -F -x python3 stable 3006.1
minion: minion:
master: jpp90man master: {{ grains.host }}
master_port: 4506 master_port: 4506
startup_states: sls startup_states: sls
sls_list: sls_list:
- setHostname - setup.virt.setHostname
use_superseded:
- module.run
features:
x509_v2: true
log_level: info
log_level_logfile: info
log_file: /opt/so/log/salt/minion
grains:
hypervisor_host: {{host ~ "_" ~ role}}
#preflight_cmds:
# - echo "preflight_cmds"
# the destination directory will be created if it doesn't exist
file_map:
/opt/so/saltstack/default/salt/repo/client/files/oracle/keys/securityonion.pub: /tmp/securityonion.pub
inline_script:
- "rpm --import /tmp/securityonion.pub"
# grains to add to the minion
{%- endfor %}
{%- endfor %} {%- endfor %}

View File

@@ -1,15 +1,22 @@
# Set up a provider with qemu+ssh protocol {# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
{%- for host in HYPERVISORS %} or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{#- provider with qemu+ssh protocol #}
{%- for role, hosts in HYPERVISORS.items() %}
{%- for host in hosts.keys() %}
kvm-ssh-{{host}}: kvm-ssh-{{host}}:
driver: libvirt driver: libvirt
url: qemu+ssh://soqemussh@{{host}}/system?socket=/var/run/libvirt/libvirt-sock url: qemu+ssh://soqemussh@{{host}}/system?socket=/var/run/libvirt/libvirt-sock
{%- endfor %}
{%- endfor %} {%- endfor %}
# Or connect to a local libvirt instance {#- local libvirt instance #}
#local-kvm: #local-kvm:
# driver: libvirt # driver: libvirt
# url: qemu:///system # url: qemu:///system
# work around flag for XML validation errors while cloning {#- work around flag for XML validation errors while cloning #}
# validate_xml: no # validate_xml: no

View File

@@ -6,6 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'salt/map.jinja' import SALTVERSION %} {% from 'salt/map.jinja' import SALTVERSION %}
{% set HYPERVISORS = salt['pillar.get']('hypervisor:nodes', {} ) %}
include: include:
- libvirt.packages - libvirt.packages
@@ -20,7 +21,7 @@ cloud_providers:
- name: /etc/salt/cloud.providers.d/libvirt.conf - name: /etc/salt/cloud.providers.d/libvirt.conf
- source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja - source: salt://salt/cloud/cloud.providers.d/libvirt.conf.jinja
- defaults: - defaults:
HYPERVISORS: {{pillar.hypervisor.nodes.hypervisor}} HYPERVISORS: {{HYPERVISORS}}
- template: jinja - template: jinja
cloud_profiles: cloud_profiles:
@@ -28,9 +29,19 @@ cloud_profiles:
- name: /etc/salt/cloud.profiles.d/socloud.conf - name: /etc/salt/cloud.profiles.d/socloud.conf
- source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja - source: salt://salt/cloud/cloud.profiles.d/socloud.conf.jinja
- defaults: - defaults:
HYPERVISORS: {{pillar.hypervisor.nodes.hypervisor}} HYPERVISORS: {{HYPERVISORS}}
- template: jinja - template: jinja
{% for role, hosts in HYPERVISORS.items() %}
{% for host in hosts.keys() %}
hypervisor_{{host}}_{{role}}_pillar_dir:
file.directory:
- name: /opt/so/saltstack/local/pillar/hypervisor/{{host}}_{{role}}
{% endfor %}
{% endfor %}
{% else %} {% else %}
{{sls}}_state_not_allowed: {{sls}}_state_not_allowed:

View File

@@ -3,7 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'soinstall.map.jinja' import DATA %} {% from 'setup/virt/soinstall.map.jinja' import DATA %}
setHostname_{{grains.id.split("_") | first}}: setHostname_{{grains.id.split("_") | first}}:
network.system: network.system:
@@ -16,6 +16,7 @@ create_pillar:
event.send: event.send:
- name: setup/so-minion - name: setup/so-minion
- data: - data:
HYPERVISOR_HOST: {{ grains.hypervisor_host }}
MAINIP: {{ DATA.MAINIP }} MAINIP: {{ DATA.MAINIP }}
MNIC: {{ DATA.MNIC }} MNIC: {{ DATA.MNIC }}
NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}' NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}'
@@ -35,6 +36,10 @@ create_pillar:
COPPER: {{ DATA.COPPER }} COPPER: {{ DATA.COPPER }}
SFP: {{ DATA.SFP }} SFP: {{ DATA.SFP }}
set_role_grain:
grains.present:
- name: role
- value: so-{{ grains.id.split("_") | last }}
# set event for firewall rules - so-firewall-minion # set event for firewall rules - so-firewall-minion
@@ -47,7 +52,7 @@ clean_sls_list:
clean_setHostname: clean_setHostname:
file.line: file.line:
- name: /etc/salt/minion - name: /etc/salt/minion
- match: '- setHostname' - match: '- setup.virt.setHostname'
- mode: delete - mode: delete
- onchanges: - onchanges:
- file: clean_sls_list - file: clean_sls_list

View File

@@ -1,10 +1,10 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one {# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #} Elastic License 2.0. #}
{% set nodetype = grains.id.split("_") | last %} {% set nodetype = grains.id.split("_") | last %}
{% import_yaml nodetype ~ '.yaml' as DATA %} {% import_yaml 'setup/virt/' ~ nodetype ~ '.yaml' as DATA %}
{% set total_mem = grains.mem_total %} {% set total_mem = grains.mem_total %}
{% do DATA.update({'MAINIP': grains.ip_interfaces.get(DATA.MNIC)[0]}) %} {% do DATA.update({'MAINIP': grains.ip_interfaces.get(DATA.MNIC)[0]}) %}
@@ -12,7 +12,7 @@
{% do DATA.update({'CPUCORES': grains.num_cpus}) %} {% do DATA.update({'CPUCORES': grains.num_cpus}) %}
{% if nodetype = "searchnode" %} {% if nodetype == "searchnode" %}
{% do DATA.update({'LSHOSTNAME': grains.host}) %} {% do DATA.update({'LSHOSTNAME': grains.host}) %}