salt3006.9, redo reactors, use virt.shutdown

This commit is contained in:
m0duspwnens
2024-08-27 09:25:40 -04:00
parent d110503639
commit 21c3835322
13 changed files with 159 additions and 83 deletions

View File

@@ -0,0 +1,22 @@
#!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import logging
import salt.client
local = salt.client.LocalClient()
def run():
vm_name = data['name']
logging.error("setHostname reactor: start for: %s " % vm_name)
r = local.cmd(vm_name, 'state.apply', ['setup.virt.setHostname'])
logging.error("setHostname reactor: return for %s: %s " % (vm_name,r))
logging.error("setHostname reactor: end for: %s " % vm_name)
return {}

22
salt/reactor/setSalt.sls Normal file
View File

@@ -0,0 +1,22 @@
#!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import logging
import salt.client
local = salt.client.LocalClient()
def run():
vm_name = data['name']
logging.error("setSalt reactor: start for: %s " % vm_name)
r = local.cmd(vm_name, 'state.apply', ['setup.virt.setSalt'])
logging.error("setSalt reactor: return for: %s: %s " % (vm_name,r))
logging.error("setSalt reactor: end for: %s " % vm_name)
return {}

21
salt/reactor/sominion.sls Normal file
View File

@@ -0,0 +1,21 @@
#!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import logging
import salt.client
local = salt.client.LocalClient()
def run():
vm_name = data['name']
logging.error("sominion reactor: start for: %s " % vm_name)
r = local.cmd(vm_name, 'state.apply', ['setup.virt.sominion'])
logging.error("sominion reactor: end for: %s " % vm_name)
return {}

View File

@@ -13,7 +13,7 @@ def run():
minionid = data['id'] minionid = data['id']
DATA = data['data'] DATA = data['data']
hv_name = DATA['HYPERVISOR_HOST'] hv_name = DATA['HYPERVISOR_HOST']
logging.error("setup reactor: %s " % DATA) logging.error("sominion_setup reactor: %s " % DATA)
vm_out_data = { vm_out_data = {
'cpu': DATA['CPU'], 'cpu': DATA['CPU'],
@@ -23,13 +23,13 @@ def run():
'sfp': DATA['SFP'] 'sfp': DATA['SFP']
} }
logging.error("setup reactor: vm_out_data: %s " % vm_out_data) logging.error("sominion_setup reactor: vm_out_data: %s " % vm_out_data)
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + minionid + ".sls", 'w') as f: with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + minionid + ".sls", 'w') as f:
yaml.dump(vm_out_data, f, default_flow_style=False) yaml.dump(vm_out_data, f, default_flow_style=False)
rc = call("NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVirt -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -a=" + DATA['INTERFACE'] + " -c=" + str(DATA['CORECOUNT']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'", shell=True) rc = call("NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVirt -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -a=" + DATA['INTERFACE'] + " -c=" + str(DATA['CORECOUNT']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'", shell=True)
logging.error('setup_reactor: rc: %s' % rc) logging.error('sominion_setup reactor: rc: %s' % rc)
return {} return {}

View File

@@ -9,6 +9,7 @@ import logging
import salt.client import salt.client
local = salt.client.LocalClient() local = salt.client.LocalClient()
import yaml import yaml
from time import sleep
def run(): def run():
@@ -17,7 +18,6 @@ def run():
compute.update({'free': compute.get('free') - vm_data.get(hw_type)}) compute.update({'free': compute.get('free') - vm_data.get(hw_type)})
logging.error("virtUpdate reactor: claiming %s compute: %s " % (hw_type,compute)) logging.error("virtUpdate reactor: claiming %s compute: %s " % (hw_type,compute))
def claim_pci(hw_type): def claim_pci(hw_type):
claimed_hw = hv_data['hypervisor']['hardware'][hw_type]['claimed'] claimed_hw = hv_data['hypervisor']['hardware'][hw_type]['claimed']
# if a list of devices was defined # if a list of devices was defined
@@ -46,8 +46,10 @@ def run():
host_devices.append(hw[1]) host_devices.append(hw[1])
logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw) logging.error("virtUpdate reactor: claimed_hw: %s " % claimed_hw)
vm_name = data['name'] vm_name = data['id']
hv_name = local.cmd(vm_name, 'grains.get', ['hypervisor_host']) logging.error("virtUpdate reactor: vm_name: %s " % vm_name)
hv_name = local.cmd(vm_name, 'grains.get', ['hypervisor_host']).get(vm_name)
logging.error("virtUpdate reactor: hv_name: %s " % hv_name)
host_devices = [] host_devices = []
@@ -67,7 +69,20 @@ def run():
except yaml.YAMLError as exc: except yaml.YAMLError as exc:
logging.error(exc) logging.error(exc)
local.cmd(hv_name, 'virt.stop', ['name=' + vm_name]) r = local.cmd(hv_name, 'virt.shutdown', ['vm_=' + vm_name])
logging.error("virtUpdate reactor: virt.shutdown: %s return: %s " % (vm_name,r))
c = 0
while True:
if c == 60:
logging.error("virtUpdate reactor: vm_name: %s failed to shutdown in time " % vm_name)
return {}
r = local.cmd(hv_name, 'virt.list_inactive_vms')
logging.error("virtUpdate reactor: virt.list_inactive_vms: %s " % r.get(hv_name))
if vm_name in r.get(hv_name):
break
c += 1
sleep(1)
for hw_type in ['disks', 'copper', 'sfp']: for hw_type in ['disks', 'copper', 'sfp']:
claim_pci(hw_type) claim_pci(hw_type)
@@ -90,8 +105,9 @@ def run():
mem = vm_data['memory'] * 1024 mem = vm_data['memory'] * 1024
r = local.cmd(hv_name, 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)]) r = local.cmd(hv_name, 'virt.update', ['name=' + vm_name, 'mem=' + str(mem), 'cpu=' + str(vm_data['cpu']), 'host_devices=' + str(host_devices)])
logging.error("virtUpdate reactor: virt.update: %s" % r) logging.error("virtUpdate reactor: virt.update: vm_name: %s return: %s" % (vm_name,r))
local.cmd(hv_name, 'virt.start', ['name=' + vm_name]) r = local.cmd(hv_name, 'virt.start', ['name=' + vm_name])
logging.error("virtUpdate reactor: virt.start: vm_name: %s return: %s" % (vm_name,r))
return {} return {}

View File

@@ -14,14 +14,14 @@ core-{{host}}:
private_key: /home/soqemussh/.ssh/id_ed25519 private_key: /home/soqemussh/.ssh/id_ed25519
sudo: True sudo: True
deploy_command: sh /tmp/.saltcloud-*/deploy.sh deploy_command: sh /tmp/.saltcloud-*/deploy.sh
script_args: -F -x python3 stable 3006.1 script_args: -F -x python3 stable 3006.9
minion: minion:
master: {{ grains.host }} master: {{ grains.host }}
master_port: 4506 master_port: 4506
startup_states: sls #startup_states: sls
sls_list: #sls_list:
- setup.virt.setHostname # - setup.virt.setSalt
- salt.minion # - setup.virt.setHostname
use_superseded: use_superseded:
- module.run - module.run
features: features:
@@ -37,6 +37,7 @@ core-{{host}}:
#file_map: #file_map:
# /opt/so/saltstack/default/salt/repo/client/files/oracle/keys/securityonion.pub: /tmp/securityonion.pub # /opt/so/saltstack/default/salt/repo/client/files/oracle/keys/securityonion.pub: /tmp/securityonion.pub
#inline_script: #inline_script:
# - "systemctl start salt-minion"
# - "rpm --import /tmp/securityonion.pub" # - "rpm --import /tmp/securityonion.pub"
# grains to add to the minion # grains to add to the minion

View File

@@ -1,4 +1,4 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt: salt:
master: master:
version: 3006.1 version: 3006.9

View File

@@ -58,14 +58,18 @@ salt_master_service:
#- salt/cloud/*/requesting #- salt/cloud/*/requesting
# - 'salt/cloud/*/deploying': # - 'salt/cloud/*/deploying':
# - /opt/so/saltstack/default/salt/reactor/createEmptyPillar.sls # - /opt/so/saltstack/default/salt/reactor/createEmptyPillar.sls
# - 'setup/so-minion':
# - /opt/so/saltstack/default/salt/reactor/setup.sls
# - 'salt/cloud/*/created': # - 'salt/cloud/*/created':
# - /opt/so/saltstack/default/salt/reactor/setSalt.sls
# - /opt/so/saltstack/default/salt/reactor/setHostname.sls
# - /opt/so/saltstack/default/salt/reactor/sominion.sls
# - 'setup/so-minion':
# - /opt/so/saltstack/default/salt/reactor/sominion_setup.sls
# - /opt/so/saltstack/default/salt/reactor/virtUpdate.sls # - /opt/so/saltstack/default/salt/reactor/virtUpdate.sls
# - 'salt/cloud/*/destroyed': # - 'salt/cloud/*/destroyed':
# - /opt/so/saltstack/default/salt/reactor/virtReleaseHardware.sls # - /opt/so/saltstack/default/salt/reactor/virtReleaseHardware.sls
# - /opt/so/saltstack/default/salt/reactor/deleteKey.sls # - /opt/so/saltstack/default/salt/reactor/deleteKey.sls
{% else %} {% else %}
{{sls}}_state_not_allowed: {{sls}}_state_not_allowed:

View File

@@ -1,6 +1,6 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt: salt:
minion: minion:
version: 3006.1 version: 3006.9
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds. service_start_delay: 30 # in seconds.

View File

@@ -11,9 +11,8 @@ LSHEAP:
CPUCORES: 4 CPUCORES: 4
IDH_MGTRESTRICT: IDH_MGTRESTRICT:
IDH_SERVICES: IDH_SERVICES:
CPU: 16 CPU: 8
MEMORY: 16 MEMORY: 8
DISKS: 1 DISKS: 0
COPPER: COPPER: 0
- 1 SFP: 0
SFP: 2

View File

@@ -3,8 +3,6 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'setup/virt/soinstall.map.jinja' import DATA %}
setHostname_{{grains.id.split("_") | first}}: setHostname_{{grains.id.split("_") | first}}:
cmd.run: cmd.run:
- name: hostnamectl set-hostname --static {{grains.id.split("_") | first}} - name: hostnamectl set-hostname --static {{grains.id.split("_") | first}}
@@ -13,56 +11,3 @@ setHostname_{{grains.id.split("_") | first}}:
- enabled: True - enabled: True
- hostname: {{grains.id.split("_") | first}} - hostname: {{grains.id.split("_") | first}}
- apply_hostname: True - apply_hostname: True
set_role_grain:
grains.present:
- name: role
- value: so-{{ grains.id.split("_") | last }}
# set event for firewall rules - so-firewall-minion
clean_sls_list:
file.line:
- name: /etc/salt/minion
- match: 'sls_list:'
- mode: delete
clean_setHostname:
file.line:
- name: /etc/salt/minion
- match: '- setup.virt.setHostname'
- mode: delete
- onchanges:
- file: clean_sls_list
set_highstate:
file.replace:
- name: /etc/salt/minion
- pattern: 'startup_states: sls'
- repl: 'startup_states: highstate'
- onchanges:
- file: clean_setHostname
create_pillar:
event.send:
- name: setup/so-minion
- data:
HYPERVISOR_HOST: {{ grains.hypervisor_host }}
MAINIP: {{ DATA.MAINIP }}
MNIC: {{ DATA.MNIC }}
NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}'
ES_HEAP_SIZE: {{ DATA.ES_HEAP_SIZE }}
PATCHSCHEDULENAME: {{ DATA.PATCHSCHEDULENAME }}
INTERFACE: {{ DATA.INTERFACE }}
NODETYPE: {{ DATA.NODETYPE }}
CORECOUNT: {{ DATA.CORECOUNT }}
LSHOSTNAME: {{ DATA.LSHOSTNAME }}
LSHEAP: {{ DATA.LSHEAP }}
CPUCORES: {{ DATA.CPUCORES }}
IDH_MGTRESTRICT: {{ DATA.IDH_MGTRESTRICT }}
IDH_SERVICES: {{ DATA.IDH_SERVICES }}
CPU: {{ DATA.CPU }}
MEMORY: {{ DATA.MEMORY }}
DISKS: {{ DATA.DISKS }}
COPPER: {{ DATA.COPPER }}
SFP: {{ DATA.SFP }}

View File

@@ -0,0 +1,16 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
set_role_grain:
grains.present:
- name: role
- value: so-{{ grains.id.split("_") | last }}
# set event for firewall rules - so-firewall-minion
set_highstate:
file.append:
- name: /etc/salt/minion
- text: 'startup_states: highstate'

View File

@@ -0,0 +1,30 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'setup/virt/soinstall.map.jinja' import DATA %}
create_pillar:
event.send:
- name: setup/so-minion
- data:
HYPERVISOR_HOST: {{ grains.hypervisor_host }}
MAINIP: {{ DATA.MAINIP }}
MNIC: {{ DATA.MNIC }}
NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}'
ES_HEAP_SIZE: {{ DATA.ES_HEAP_SIZE }}
PATCHSCHEDULENAME: {{ DATA.PATCHSCHEDULENAME }}
INTERFACE: {{ DATA.INTERFACE }}
NODETYPE: {{ DATA.NODETYPE }}
CORECOUNT: {{ DATA.CORECOUNT }}
LSHOSTNAME: {{ DATA.LSHOSTNAME }}
LSHEAP: {{ DATA.LSHEAP }}
CPUCORES: {{ DATA.CPUCORES }}
IDH_MGTRESTRICT: {{ DATA.IDH_MGTRESTRICT }}
IDH_SERVICES: {{ DATA.IDH_SERVICES }}
CPU: {{ DATA.CPU }}
MEMORY: {{ DATA.MEMORY }}
DISKS: {{ DATA.DISKS }}
COPPER: {{ DATA.COPPER }}
SFP: {{ DATA.SFP }}