virt start

This commit is contained in:
m0duspwnens
2024-07-31 15:19:22 -04:00
parent 8e4777a5ff
commit 810be2c9d2
16 changed files with 532 additions and 52 deletions

View File

@@ -0,0 +1,61 @@
hypervisor:
model1:
hardware:
cpu:
total: 128
free: 128
memory:
total: 128
free: 128
disks:
free:
3: pci_0000_c7_00_0
4: pci_0000_c8_00_0
claimed:
1: pci_0000_c5_00_0
2: pci_0000_c6_00_0
copper:
free:
1: pci_0000_c4_00_0
2: pci_0000_c4_00_1
3: pci_0000_c4_00_2
4: pci_0000_c4_00_3
claimed: {}
sfp:
free:
5: pci_0000_41_00_0
6: pci_0000_41_00_1
claimed: {}
model2:
hardware:
cpu:
total: 128
free: 128
memory:
total: 512
free: 512
disks:
free:
3: pci_0000_c8_00_0
4: pci_0000_c9_00_0
5: pci_0000_c10_00_0
6: pci_0000_c11_00_0
claimed:
1: pci_0000_c6_00_0
2: pci_0000_c7_00_0
copper:
free:
1: pci_0000_c4_00_0
2: pci_0000_c4_00_1
3: pci_0000_c4_00_2
4: pci_0000_c4_00_3
5: pci_0000_c5_00_0
6: pci_0000_c5_00_1
7: pci_0000_c5_00_2
8: pci_0000_c5_00_3
claimed: {}
sfp:
free:
9: pci_0000_41_00_0
10: pci_0000_41_00_1
claimed: {}

View File

@@ -1,51 +0,0 @@
listen_tls = 0
listen_tcp = 0
tls_port = "16514"
tcp_port = "16509"
listen_addr = "0.0.0.0"
unix_sock_group = "root"
unix_sock_ro_perms = "0777"
unix_sock_rw_perms = "0770"
unix_sock_admin_perms = "0700"
unix_sock_dir = "/run/libvirt"
auth_unix_ro = "none"
auth_unix_rw = "none"
auth_tcp = "none"
auth_tls = "none"
tcp_min_ssf = 112
access_drivers = ["nop"]
key_file = "/etc/pki/libvirt/private/serverkey.pem"
cert_file = "/etc/pki/libvirt/servercert.pem"
ca_file = "/etc/pki/CA/cacert.pem"
crl_file = "/etc/pki/CA/crl.pem"
tls_no_sanity_certificate = 0
tls_no_verify_certificate = 0
tls_allowed_dn_list = ["DN1", "DN2"]
tls_priority = "NORMAL"
sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM"]
max_clients = 5000
max_queued_clients = 1000
max_anonymous_clients = 20
min_workers = 5
max_workers = 20
prio_workers = 5
max_client_requests = 5
admin_min_workers = 1
admin_max_workers = 5
admin_max_clients = 5
admin_max_queued_clients = 5
admin_max_client_requests = 5
log_level = 3
log_filters = "1:qemu 1:libvirt 4:object 4:json 4:event 1:util"
log_outputs = "3:syslog:libvirtd"
audit_level = 2
audit_logging = 1
host_uuid = "00000000-0000-0000-0000-000000000000"
host_uuid_source = "smbios"
keepalive_interval = 5
keepalive_count = 5
keepalive_required = 1
admin_keepalive_required = 1
admin_keepalive_interval = 5
admin_keepalive_count = 5
ovs_timeout = 5

View File

@@ -78,3 +78,6 @@ virbr0:
- proto: dhcp
- require:
- network: ens18
# virtlogd service may not restart following reboot without this
#semanage permissive -a virtlogd_t

View File

@@ -59,7 +59,11 @@ for i in "$@"; do
-i=*|--ip=*)
MAINIP="${i#*=}"
shift
;;
;;
-c=*|--cpu=*)
CORECOUNT="${i#*=}"
shift
;;
-*|--*)
echo "Unknown option $i"
exit 1
@@ -637,6 +641,10 @@ case "$OPERATION" in
updateMineAndApplyStates
;;
"addVirt")
setupMinionFiles
;;
"delete")
deleteMinionFiles
deleteMinion

View File

@@ -0,0 +1,26 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#!py
import logging
import salt.client
local = salt.client.LocalClient()
from subprocess import call
import yaml
import os
def run():
#logging.error("createEmptyPillar reactor: data: %s" % data)
vm_name = data['kwargs']['name']
logging.error("createEmptyPillar reactor: vm_name: %s" % vm_name)
pillar_root = '/opt/so/saltstack/local/pillar/minions/'
pillar_files = ['adv_' + vm_name + '.sls', vm_name + '.sls']
for f in pillar_files:
if not os.path.exists(pillar_root + f):
os.mknod(pillar_root + f)
return {}

View File

@@ -0,0 +1,11 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
remove_key:
wheel.key.delete:
- args:
- match: {{ data['name'] }}
{% do salt.log.info('deleteKey reactor: deleted minion key: %s' % data['name']) %}

37
salt/reactor/setup.sls Normal file
View File

@@ -0,0 +1,37 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#!py
import logging
import salt.client
local = salt.client.LocalClient()
from subprocess import call
import yaml
def run():
minionid = data['id']
hv_name = 'jppvirt'
DATA = data['data']
logging.error("setup reactor: %s " % DATA)
vm_out_data = {
'cpu': DATA['CPU'],
'memory': DATA['MEMORY'],
'disks': DATA['DISKS'],
'copper': DATA['COPPER'],
'sfp': DATA['SFP']
}
logging.error("setup reactor: vm_out_data: %s " % vm_out_data)
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + minionid + ".sls", 'w') as f:
yaml.dump(vm_out_data, f, default_flow_style=False)
rc = call("NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVirt -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -a=" + DATA['INTERFACE'] + " -c=" + str(DATA['CORECOUNT']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'", shell=True)
logging.error('setup_reactor: rc: %s' % rc)
return {}

View File

@@ -0,0 +1,67 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#!py
import logging
import salt.client
local = salt.client.LocalClient()
from subprocess import call
import yaml
import os
def run():
def release_compute(hw_type):
compute = hv_data['hypervisor']['hardware'][hw_type]
compute.update({'free': compute.get('free') + vm_data.get(hw_type)})
logging.error("virtReboot reactor: claiming %s compute: %s " % (hw_type,compute))
def release_pci(hw_type):
free_hw = hv_data['hypervisor']['hardware'][hw_type]['free']
for hw in vm_data[hw_type]:
f_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['claimed'].pop(hw)}
free_hw.update(f_hw)
logging.error("virtReleaseHardware reactor: released %s: %s" % (hw_type, f_hw))
vm_name = data['name']
hv_name = 'jppvirt'
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
try:
vm_data=yaml.safe_load(f)
logging.error("virtReleaseHardware reactor: vm_data %s " % vm_data)
#logging.error(yaml.safe_load(f))
except yaml.YAMLError as exc:
logging.error(exc)
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls") as f:
try:
hv_data=yaml.safe_load(f)
logging.error("virtReleaseHardware reactor: hv_data: %s " % hv_data)
#logging.error(yaml.safe_load(f))
except yaml.YAMLError as exc:
logging.error(exc)
for hw_type in ['disks', 'copper', 'sfp']:
release_pci(hw_type)
for hw_type in ['cpu', 'memory']:
release_compute(hw_type)
# update the free hardware for the hypervisor
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:
yaml.dump(hv_data, f, default_flow_style=False)
# remove the old vm_data file since the vm has been purged
os.remove("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls")
# remove minion pillar files
os.remove("/opt/so/saltstack/local/pillar/minions/adv_" + vm_name + ".sls")
os.remove("/opt/so/saltstack/local/pillar/minions/" + vm_name + ".sls")
return {}

View File

@@ -0,0 +1,95 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#!py
import logging
import salt.client
local = salt.client.LocalClient()
from subprocess import call
import yaml
def run():
def claim_compute(hw_type):
compute = hv_data['hypervisor']['hardware'][hw_type]
compute.update({'free': compute.get('free') - vm_data.get(hw_type)})
logging.error("virtUpdate reactor: claiming %s compute: %s " % (hw_type,compute))
def claim_pci(hw_type):
claimed_hw = hv_data['hypervisor']['hardware'][hw_type]['claimed']
# if a list of devices was defined
if type(vm_data[hw_type]) == list:
for hw in vm_data[hw_type]:
c_hw = {hw: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw)}
claimed_hw.update(c_hw)
host_devices.append(c_hw[hw])
#hv_data['hypervisor']['hardware'][hw_type].update({'claimed': claimed_hw})
# if a number of devices was defined
else:
n = vm_data[hw_type]
vm_data[hw_type] = []
# grab the first number of devices as defined for the node type
claiming_hw = list(hv_data['hypervisor']['hardware'][hw_type]['free'].items())[:n]
logging.error("virtUpdate reactor: claiming %s hardware: %s " % (hw_type,claiming_hw))
# claiming_hw is a list of tuples containing (numerical_id, pci_id)
# claiming_hw example: [(1, 'pci_0000_c4_00_0'), (2, 'pci_0000_c4_00_1')]
for hw in claiming_hw:
c_hw = {hw[0]: hv_data['hypervisor']['hardware'][hw_type]['free'].pop(hw[0])}
claimed_hw.update(c_hw)
vm_data[hw_type].append(hw[0])
host_devices.append(hw[1])
logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw)
vm_name = data['name']
hv_name = 'jppvirt'
host_devices = []
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls") as f:
try:
vm_data=yaml.safe_load(f)
logging.error("virtUpdate reactor: vm_data %s " % vm_data)
#logging.error(yaml.safe_load(f))
except yaml.YAMLError as exc:
logging.error(exc)
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls") as f:
try:
hv_data=yaml.safe_load(f)
logging.error("virtUpdate reactor: hv_data: %s " % hv_data)
#logging.error(yaml.safe_load(f))
except yaml.YAMLError as exc:
logging.error(exc)
local.cmd('jppvirt', 'virt.stop', ['name=' + vm_name])
for hw_type in ['disks', 'copper', 'sfp']:
claim_pci(hw_type)
for hw_type in ['cpu', 'memory']:
claim_compute(hw_type)
logging.error("virtUpdate reactor: host_devices: %s " % host_devices)
# update the claimed hardware for the hypervisor
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + hv_name + ".sls", 'w') as f:
yaml.dump(hv_data, f, default_flow_style=False)
# since the original hw request provided was a count of hw instead of specific pci ids
# we need to update the vm_data file with the assigned pci ids that were claimed
# update the vm_data file with the hardware it claimed
logging.error("virtUpdate reactor: new vm_data: %s " % vm_data)
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + vm_name + ".sls", 'w') as f:
yaml.dump(vm_data, f, default_flow_style=False)
mem = vm_data['memory'] * 1024
r = local.cmd('jppvirt', 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)])
logging.error("virtUpdate reactor: virt.update: %s" % r)
local.cmd('jppvirt', 'virt.start', ['name=' + vm_name])
return {}

View File

@@ -0,0 +1,66 @@
searchnode:
provider: local-kvm
base_domain: jppol9vm
ip_source: qemu-agent
ssh_username: jpatterson
private_key: /home/jpatterson/.ssh/id_rsa
sudo: True
# /tmp is mounted noexec.. do workaround
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
# grains to add to the minion
#grains:
# clones-are-awesome: true
# override minion settings
minion:
master: jppvirt
master_port: 4506
startup_states: sls
sls_list:
- setHostname
sensor:
provider: local-kvm
base_domain: jppol9vm
ip_source: qemu-agent
ssh_username: jpatterson
private_key: /home/jpatterson/.ssh/id_rsa
sudo: True
#preflight_cmds:
# - echo "do something"
# - hostname
# /tmp is mounted noexec.. do workaround
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
# the destination directory will be created if it doesn't exist
#file_map:
# /srv/salt/filemap.txt: /remote/path/to/use/custom/filemap.txt
#inline_script:
# - echo "SLEEPING"
# - hostname
# grains to add to the minion
#grains:
# clones-are-awesome: true
# override minion settings
minion:
master: jppvirt
master_port: 4506
startup_states: sls
sls_list:
- setHostname
core:
provider: local-kvm
base_domain: jppol9vm
ip_source: qemu-agent
ssh_username: jpatterson
private_key: /home/jpatterson/.ssh/id_rsa
sudo: True
deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1
minion:
master: jppvirt
master_port: 4506
startup_states: sls
sls_list:
- setHostname

View File

@@ -0,0 +1,11 @@
# Set up a provider with qemu+ssh protocol
#kvm-via-ssh:
# driver: libvirt
# url: qemu+ssh://jpatterson@jppvirt/system?socket=/var/run/libvirt/libvirt-sock
# Or connect to a local libvirt instance
local-kvm:
driver: libvirt
url: qemu:///system
# work around flag for XML validation errors while cloning
validate_xml: no

View File

@@ -47,6 +47,20 @@ salt_master_service:
- file: engines_config
- order: last
# we need to managed adding the following to salt-master config if there are hypervisors
#reactor:
#- salt/cloud/*/creating':
#- salt/cloud/*/requesting
# - 'salt/cloud/*/deploying':
# - /srv/salt/reactor/createEmptyPillar.sls
# - 'setup/so-minion':
# - /srv/salt/reactor/setup.sls
# - 'salt/cloud/*/created':
# - /srv/salt/reactor/virtUpdate.sls
# - 'salt/cloud/*/destroyed':
# - /srv/salt/reactor/virtReleaseHardware.sls
# - /srv/salt/reactor/deleteKey.sls
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -0,0 +1,18 @@
MAINIP:
MNIC: eth0
NODE_DESCRIPTION: 'virt search'
ES_HEAP_SIZE:
PATCHSCHEDULENAME:
INTERFACE: bond0
NODETYPE: SEARCHNODE
CORECOUNT: 16
LSHOSTNAME:
LSHEAP:
CPUCORES: 16
IDH_MGTRESTRICT:
IDH_SERVICES:
CPU: 16
MEMORY: 32
DISKS: 1
COPPER: 0
SFP: 0

View File

@@ -0,0 +1,19 @@
MAINIP:
MNIC: eth0
NODE_DESCRIPTION: 'virt sensor'
ES_HEAP_SIZE:
PATCHSCHEDULENAME:
INTERFACE: bond0
NODETYPE: SENSOR
CORECOUNT: 4
LSHOSTNAME:
LSHEAP:
CPUCORES: 4
IDH_MGTRESTRICT:
IDH_SERVICES:
CPU: 16
MEMORY: 16
DISKS: 1
COPPER:
- 1
SFP: 2

View File

@@ -0,0 +1,61 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'soinstall.map.jinja' import DATA %}
setHostname_{{grains.id.split("_") | first}}:
network.system:
- name: {{grains.id.split("_") | first}}
- enabled: True
- hostname: {{grains.id.split("_") | first}}
- apply_hostname: True
create_pillar:
event.send:
- name: setup/so-minion
- data:
MAINIP: {{ DATA.MAINIP }}
MNIC: {{ DATA.MNIC }}
NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}'
ES_HEAP_SIZE: {{ DATA.ES_HEAP_SIZE }}
PATCHSCHEDULENAME: {{ DATA.PATCHSCHEDULENAME }}
INTERFACE: {{ DATA.INTERFACE }}
NODETYPE: {{ DATA.NODETYPE }}
CORECOUNT: {{ DATA.CORECOUNT }}
LSHOSTNAME: {{ DATA.LSHOSTNAME }}
LSHEAP: {{ DATA.LSHEAP }}
CPUCORES: {{ DATA.CPUCORES }}
IDH_MGTRESTRICT: {{ DATA.IDH_MGTRESTRICT }}
IDH_SERVICES: {{ DATA.IDH_SERVICES }}
CPU: {{ DATA.CPU }}
MEMORY: {{ DATA.MEMORY }}
DISKS: {{ DATA.DISKS }}
COPPER: {{ DATA.COPPER }}
SFP: {{ DATA.SFP }}
# set event for firewall rules - so-firewall-minion
clean_sls_list:
file.line:
- name: /etc/salt/minion
- match: 'sls_list:'
- mode: delete
clean_setHostname:
file.line:
- name: /etc/salt/minion
- match: '- setHostname'
- mode: delete
- onchanges:
- file: clean_sls_list
set_highstate:
file.replace:
- name: /etc/salt/minion
- pattern: 'startup_states: sls'
- repl: 'startup_states: highstate'
- onchanges:
- file: clean_setHostname

View File

@@ -0,0 +1,34 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% set nodetype = grains.id.split("_") | last %}
{% import_yaml nodetype ~ '.yaml' as DATA %}
{% set total_mem = grains.mem_total %}
{% do DATA.update({'MAINIP': grains.ip_interfaces.get(DATA.MNIC)[0]}) %}
{% do DATA.update({'CORECOUNT': grains.num_cpus}) %}
{% do DATA.update({'CPUCORES': grains.num_cpus}) %}
{% if nodetype = "searchnode" %}
{% do DATA.update({'LSHOSTNAME': grains.host}) %}
{# this replicates the function es_heapsize in so-functions #}
{% if total_mem < 8000 %}
{% set ES_HEAP_SIZE = "600m" %}
{% elif total_mem >= 100000 %}
{% set ES_HEAP_SIZE = "25000m" %}
{% else %}
{% set ES_HEAP_SIZE = total_mem / 3 %}
{% if ES_HEAP_SIZE > 25000 %}
{% set ES_HEAP_SIZE = "25000m" %}
{% else %}
{% set ES_HEAP_SIZE = ES_HEAP_SIZE ~ "m" %}
{% endif %}
{% endif %}
{% do DATA.update({'ES_HEAP_SIZE': ES_HEAP_SIZE}) %}
{% endif %}