diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 34e069ece..79eea59fe 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -426,10 +426,6 @@ function checkMine() { } -function updateMine() { - retry 20 1 "salt '$MINION_ID' mine.update" True -} - function createEVAL() { is_pcaplimit=true pcapspace @@ -604,20 +600,12 @@ function addMinion() { } function updateMineAndApplyStates() { - # tell the minion to populate the mine with data from mine_functions which is populated during setup - # this only needs to happen on non managers since they handle this during setup - # and they need to wait for ca creation to update the mine - updateMine - checkMine "network.ip_addrs" - # apply the elasticsearch state to the manager if a new searchnode was added - if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then - # calls so-common and set_minionid sets MINIONID to local minion id - set_minionid - salt $MINIONID state.apply elasticsearch queue=True --async - salt $MINIONID state.apply soc queue=True --async - fi - # run this async so the cli doesn't wait for a return - salt "$MINION_ID" state.highstate --async queue=True + + #checkMine "network.ip_addrs" + # calls so-common and set_minionid sets MINIONID to local minion id + set_minionid + # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured + salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 & } function setupMinionFiles() { diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls new file mode 100644 index 000000000..c05a812a3 --- /dev/null +++ b/salt/orch/deploy_newnode.sls @@ -0,0 +1,32 @@ +{% set MANAGER = salt['pillar.get']('setup:manager') %} +{% set NEWNODE = salt['pillar.get']('setup:newnode') %} + +# tell the minion to populate the mine with data from mine_functions which is populated during setup +# this only needs to happen on non managers since they handle this during setup +# and they need to wait for ca creation to update the mine +{{NEWNODE}}_update_mine: + salt.function: + - name: mine.update + - tgt: {{ NEWNODE }} + - retry: + attempts: 36 + interval: 5 + +# we need to prepare the manager for a new searchnode or heavynode +{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} +manager_run_es_soc: + salt.state: + - tgt: {{ MANAGER }} + - sls: + - elasticsearch + - soc + - queue: True + - require: + - salt: {{NEWNODE}}_update_mine +{% endif %} + +{{NEWNODE}}_run_highstate: + salt.state: + - tgt: {{ NEWNODE }} + - highstate: True + - queue: True diff --git a/setup/so-setup b/setup/so-setup index 1c3be22bf..8a1879c58 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -808,7 +808,6 @@ if ! [[ -f $install_opt_file ]]; then configure_minion "$minion_type" check_sos_appliance drop_install_options - logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/" verify_setup fi