managerhype br0 setup

This commit is contained in:
Josh Patterson
2025-09-25 16:06:25 -04:00
parent 5d1edf6d86
commit 1fb558cc77
3 changed files with 17 additions and 7 deletions

View File

@@ -4,7 +4,10 @@
Elastic License 2.0. #} Elastic License 2.0. #}
{% set role = salt['grains.get']('role', '') %} {% set role = salt['grains.get']('role', '') %}
{% if role in ['so-hypervisor','so-managerhype'] %} {# We are using usebr0 mostly for setup of the so-managerhype node and controlling when we use br0 vs the physical interface #}
{% set usebr0 = salt['pillar.get']('usebr0', True) %}
{% if role in ['so-hypervisor','so-managerhype'] and usebr0 %}
{% set interface = 'br0' %} {% set interface = 'br0' %}
{% else %} {% else %}
{% set interface = pillar.host.mainint %} {% set interface = pillar.host.mainint %}

View File

@@ -541,8 +541,15 @@ configure_minion() {
"log_file: /opt/so/log/salt/minion"\ "log_file: /opt/so/log/salt/minion"\
"#startup_states: highstate" >> "$minion_config" "#startup_states: highstate" >> "$minion_config"
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "$MNIC"}}'" # At the time the so-managerhype node does not yet have the bridge configured.
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="{'host': {'mainint': $MNIC}}" # The so-hypervisor node doesn't either, but it doesn't cause issues here.
local usebr0=false
if [ "$minion_type" == 'hypervisor' ]; then
usebr0=true
fi
local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}"
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'"
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json"
{ {
logCmd "systemctl enable salt-minion"; logCmd "systemctl enable salt-minion";
@@ -1195,9 +1202,9 @@ hypervisor_local_states() {
logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True" logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True"
info "Setting up bridge for $MNIC" info "Setting up bridge for $MNIC"
salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True
if [ $is_managerhype ]; then #if [ $is_managerhype ]; then
logCmd "salt-call state.apply salt.minion queue=True" # logCmd "salt-call state.apply salt.minion queue=True"
fi #fi
fi fi
} }

View File

@@ -762,6 +762,7 @@ if ! [[ -f $install_opt_file ]]; then
fi fi
logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common.packages"
logCmd "salt-call state.apply common" logCmd "salt-call state.apply common"
hypervisor_local_states
# this will apply the salt.minion state first since salt.master includes salt.minion # this will apply the salt.minion state first since salt.master includes salt.minion
logCmd "salt-call state.apply salt.master" logCmd "salt-call state.apply salt.master"
# wait here until we get a response from the salt-master since it may have just restarted # wait here until we get a response from the salt-master since it may have just restarted
@@ -826,7 +827,6 @@ if ! [[ -f $install_opt_file ]]; then
checkin_at_boot checkin_at_boot
set_initial_firewall_access set_initial_firewall_access
logCmd "salt-call schedule.enable -linfo --local" logCmd "salt-call schedule.enable -linfo --local"
hypervisor_local_states
verify_setup verify_setup
else else
touch /root/accept_changes touch /root/accept_changes