mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
properly set memory and CPUCORES for minion pillars during vm setup
This commit is contained in:
@@ -10,26 +10,32 @@ from subprocess import call
|
||||
import yaml
|
||||
|
||||
def run():
|
||||
logging.debug('sominion_setup_reactor: Running')
|
||||
minionid = data['id']
|
||||
DATA = data['data']
|
||||
hv_name = DATA['HYPERVISOR_HOST']
|
||||
logging.error("sominion_setup reactor: %s " % DATA)
|
||||
logging.debug('sominion_setup_reactor: DATA: %s' % DATA)
|
||||
|
||||
vm_out_data = {
|
||||
'cpu': DATA['CPU'],
|
||||
'memory': DATA['MEMORY'],
|
||||
'disks': DATA['DISKS'],
|
||||
'copper': DATA['COPPER'],
|
||||
'sfp': DATA['SFP']
|
||||
}
|
||||
|
||||
logging.error("sominion_setup reactor: vm_out_data: %s " % vm_out_data)
|
||||
# Build the base command
|
||||
cmd = "NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVM -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -d='" + DATA['NODE_DESCRIPTION'] + "'"
|
||||
|
||||
# Add optional arguments only if they exist in DATA
|
||||
if 'CORECOUNT' in DATA:
|
||||
cmd += " -c=" + str(DATA['CORECOUNT'])
|
||||
|
||||
if 'INTERFACE' in DATA:
|
||||
cmd += " -a=" + DATA['INTERFACE']
|
||||
|
||||
if 'ES_HEAP_SIZE' in DATA:
|
||||
cmd += " -e=" + DATA['ES_HEAP_SIZE']
|
||||
|
||||
if 'LS_HEAP_SIZE' in DATA:
|
||||
cmd += " -l=" + DATA['LS_HEAP_SIZE']
|
||||
|
||||
logging.debug('sominion_setup_reactor: Command: %s' % cmd)
|
||||
rc = call(cmd, shell=True)
|
||||
|
||||
with open("/opt/so/saltstack/local/pillar/hypervisor/" + hv_name + "/" + minionid + ".sls", 'w') as f:
|
||||
yaml.dump(vm_out_data, f, default_flow_style=False)
|
||||
|
||||
rc = call("NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVM -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -a=" + DATA['INTERFACE'] + " -c=" + str(DATA['CPU']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'" + " -e=" + DATA['ES_HEAP_SIZE'] + " -l=" + DATA['LS_HEAP_SIZE'], shell=True)
|
||||
|
||||
logging.error('sominion_setup reactor: rc: %s' % rc)
|
||||
logging.info('sominion_setup_reactor: rc: %s' % rc)
|
||||
|
||||
return {}
|
||||
|
||||
@@ -4,18 +4,67 @@
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% set nodetype = grains.id.split("_") | last %}
|
||||
{% import_yaml 'setup/virt/' ~ nodetype ~ '.yaml' as DATA %}
|
||||
{% set total_mem = grains.mem_total %}
|
||||
{% set hypervisor = salt['grains.get']('salt-cloud:profile').split('-')[1] %}
|
||||
|
||||
{# Import hardware details from VM hardware tracking file #}
|
||||
{% import_json 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ grains.id as vm_hardware %}
|
||||
|
||||
{% set DATA = {} %}
|
||||
{% do DATA.update({'MNIC': 'enp1s0'}) %}
|
||||
{% do DATA.update({'MAINIP': grains.ip_interfaces.get(DATA.MNIC)[0]}) %}
|
||||
{% do DATA.update({'CORECOUNT': grains.num_cpus}) %}
|
||||
{% do DATA.update({'CPUCORES': grains.num_cpus}) %}
|
||||
|
||||
{# Use CPU value from VM hardware file if available, otherwise fallback to grains #}
|
||||
{% if vm_hardware and vm_hardware.get('config', {}).get('cpu') %}
|
||||
{% do DATA.update({'CPUCORES': vm_hardware.get('config', {}).get('cpu')|int }) %}
|
||||
{% do salt.log.info('Using CPU from VM hardware file: ' ~ vm_hardware.get('config', {}).get('cpu')|string) %}
|
||||
{% else %}
|
||||
{% do DATA.update({'CPUCORES': grains.num_cpus }) %}
|
||||
{% do salt.log.error('Using CPU from grains: ' ~ grains.num_cpus|string) %}
|
||||
{% endif %}
|
||||
|
||||
{# Use memory value from VM hardware file if available, otherwise fallback to grains. If grains is used, it will be from cpu/mem from the base domain. #}
|
||||
{% if vm_hardware and vm_hardware.get('config', {}).get('memory') %}
|
||||
{% set total_mem = vm_hardware.get('config', {}).get('memory')|int * 1024 %}
|
||||
{% do salt.log.info('Using memory from VM hardware file: ' ~ vm_hardware.get('config', {}).get('memory')|string ~ ' (converted to ' ~ total_mem|string ~ ')') %}
|
||||
{% else %}
|
||||
{% set total_mem = grains.mem_total %}
|
||||
{% do salt.log.error('Using memory from grains: ' ~ total_mem|string) %}
|
||||
{% endif %}
|
||||
|
||||
{% do DATA.update({'NODE_DESCRIPTION': 'VM of ' ~ hypervisor}) %}
|
||||
{% do DATA.update({'NODETYPE': nodetype | upper}) %}
|
||||
|
||||
{% if nodetype in ['standalone', 'sensor', 'heavynode']%}
|
||||
{% do DATA.update({'INTERFACE': 'bond0'}) %}
|
||||
{% endif %}
|
||||
|
||||
{# Calculate reasonable core usage #}
|
||||
{% set cores_for_zeek = (DATA.CPUCORES / 2) - 1 %}
|
||||
{% do salt.log.info('cores_for_zeek calculation using CPUCORES: ' ~ DATA.CPUCORES|string) %}
|
||||
{% do salt.log.info('cores_for_zeek: ' ~ cores_for_zeek|string) %}
|
||||
{% set lb_procs_round = cores_for_zeek|round|int %}
|
||||
{% do salt.log.info('lb_procs_round: ' ~ lb_procs_round|string) %}
|
||||
{% set lb_procs = 1 if lb_procs_round < 1 else lb_procs_round %}
|
||||
{% do salt.log.info('lb_procs: ' ~ lb_procs|string) %}
|
||||
{# Check memory conditions #}
|
||||
{% set low_mem = false %}
|
||||
{% do salt.log.info('Memory check using total_mem: ' ~ total_mem|string) %}
|
||||
{% if nodetype in ['standalone', 'heavynode'] %}
|
||||
{% if total_mem > 15000 and total_mem < 24000 %}
|
||||
{% set low_mem = true %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{# Set CORECOUNT based on memory conditions #}
|
||||
{% if low_mem %}
|
||||
{% do DATA.update({'CORECOUNT': 1}) %}
|
||||
{% else %}
|
||||
{% do DATA.update({'CORECOUNT': lb_procs}) %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if nodetype in ['searchnode', 'receiver', 'fleet', 'heavynode'] %}
|
||||
|
||||
{# we can't use the host grain here because the grain may not be updated yet from the hostname change #}
|
||||
{% do DATA.update({'LSHOSTNAME': grains.id.split("_") | first}) %}
|
||||
|
||||
{% if total_mem >= 32000 or nodetype in ['managersearch','heavynode','standalone'] %}
|
||||
{% set LSHEAP="1000m" %}
|
||||
{% elif nodetype == 'eval' %}
|
||||
@@ -24,11 +73,8 @@
|
||||
{% set LSHEAP="500m" %}
|
||||
{% endif %}
|
||||
{% do DATA.update({'LSHEAP': LSHEAP}) %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if nodetype in ['searchnode', 'heavynode'] %}
|
||||
|
||||
{# this replicates the function es_heapsize in so-functions #}
|
||||
{% if total_mem < 8000 %}
|
||||
{% set ES_HEAP_SIZE = "600m" %}
|
||||
@@ -43,5 +89,4 @@
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% do DATA.update({'ES_HEAP_SIZE': ES_HEAP_SIZE}) %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -13,18 +13,17 @@ create_pillar:
|
||||
MAINIP: {{ DATA.MAINIP }}
|
||||
MNIC: {{ DATA.MNIC }}
|
||||
NODE_DESCRIPTION: '{{ DATA.NODE_DESCRIPTION }}'
|
||||
ES_HEAP_SIZE: {{ DATA.ES_HEAP_SIZE }}
|
||||
PATCHSCHEDULENAME: {{ DATA.PATCHSCHEDULENAME }}
|
||||
INTERFACE: {{ DATA.INTERFACE }}
|
||||
NODETYPE: {{ DATA.NODETYPE }}
|
||||
{% if 'CORECOUNT' in DATA %}
|
||||
CORECOUNT: {{ DATA.CORECOUNT }}
|
||||
{% endif %}
|
||||
{% if 'ES_HEAP_SIZE' in DATA %}
|
||||
ES_HEAP_SIZE: {{ DATA.ES_HEAP_SIZE }}
|
||||
{% endif %}
|
||||
{% if 'LSHOSTNAME' in DATA %}
|
||||
LSHOSTNAME: {{ DATA.LSHOSTNAME }}
|
||||
{% endif %}
|
||||
{% if 'LSHEAP' in DATA %}
|
||||
LS_HEAP_SIZE: {{ DATA.LSHEAP }}
|
||||
CPUCORES: {{ DATA.CPUCORES }}
|
||||
IDH_MGTRESTRICT: {{ DATA.IDH_MGTRESTRICT }}
|
||||
IDH_SERVICES: {{ DATA.IDH_SERVICES }}
|
||||
CPU: {{ DATA.CPU }}
|
||||
MEMORY: {{ DATA.MEMORY }}
|
||||
DISKS: {{ DATA.DISKS }}
|
||||
COPPER: {{ DATA.COPPER }}
|
||||
SFP: {{ DATA.SFP }}
|
||||
{% endif %}
|
||||
|
||||
Reference in New Issue
Block a user