Getting Close

This commit is contained in:
Mike Reeves
2018-10-11 19:38:25 -04:00
parent 7982f8ffff
commit 9ee87a92e9
9 changed files with 52 additions and 3 deletions

15
pillar/data/addtotab.sh Normal file
View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
# This script adds sensors/nodes/etc to the nodes tab
TYPE=$1
NAME=$2
IPADDRESS=$3
if grep -q $3 "/opt/so/saltstack/pillar/data/$1.sls"; then
echo "Storage Node Already in There"
else
echo " $2:" >> /opt/so/saltstack/pillar/data/$1.sls
echo " - $3" >> /opt/so/saltstack/pillar/data/$1.sls
fi

3
pillar/data/nodestab.sls Normal file
View File

@@ -0,0 +1,3 @@
nodestab:

View File

@@ -10,6 +10,7 @@ base:
- masters.{{ grains.host }}
- static
- firewall.*
- data
'G@role:so-node':
- nodes.schedule

View File

@@ -74,3 +74,11 @@ so-kibana:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- port_bindings:
- 0.0.0.0:5601:5601
# Keep the setting correct
KibanaHappy:
cmd.script:
shell: /bin/bash
runas: socore
source: salt://kibana/bin/keepkibanahappy.sh
template: jinja

View File

@@ -1,9 +1,8 @@
{%- if salt['grains.get']('role') == 'so-master' %}
{%- set master = '172.17.0.6' -%}
{%- set master = salt['pillar.get']('master:mainip', '') -%}
{%- set nodetype = 'master' %}
{%- else %}
{%- set nodetype = salt['pillar.get']('node:node_type', 'storage') %}
{%- set master = grains['master'] %}
{%- endif %}
output {
redis {

View File

@@ -24,6 +24,8 @@ base:
- redis
- elasticsearch
- logstash
- kibana
- utility
# Storage node logic

View File

@@ -1,4 +1,5 @@
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set MASTER = grains['master'] %}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
COUNT=0
@@ -23,5 +24,12 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
exit
fi
echo "Applying cross cluster search config..."
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
# Add all the storage nodes to cross cluster searching.
{%- for SN, SNIP in salt['pillar.get']('nodestab', {}).iteritems() %}}
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNIP }}:9200"]}}}}}'
{%- endfor %}

10
salt/utility/init.sls Normal file
View File

@@ -0,0 +1,10 @@
# This state is for checking things
{% if grains['role'] == 'so-master' %}
# Make sure Cross Cluster is good. Will need some logic once we have hot/warm
crossclusterson:
cmd.script:
shell: /bin/bash
runas: socore
source: salt://utility/bin/crossthestreams.sh
template: jinja

View File

@@ -577,6 +577,7 @@ salt_master_directories() {
# Copy over the salt code and templates
cp -R pillar/* /opt/so/saltstack/pillar/
chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh
chmod +x /opt/so/saltstack/pillar/data/addtotab.sh
cp -R salt/* /opt/so/saltstack/salt/
}
@@ -639,6 +640,7 @@ set_initial_firewall_policy() {
if [ $INSTALLTYPE == 'STORAGENODE' ]; then
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $HOSTNAME $MAINIP
fi
if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
@@ -694,7 +696,8 @@ update_sudoers() {
# Update Sudoers so that socore can accept keys without a password
echo "socore ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | sudo tee -a /etc/sudoers
echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | sudo tee -a /etc/sudoers
echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | sudo tee -a /etc/sudoers
}
###########################################