mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-16 22:12:48 +01:00
Getting Close
This commit is contained in:
15
pillar/data/addtotab.sh
Normal file
15
pillar/data/addtotab.sh
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This script adds sensors/nodes/etc to the nodes tab
|
||||||
|
|
||||||
|
TYPE=$1
|
||||||
|
NAME=$2
|
||||||
|
IPADDRESS=$3
|
||||||
|
|
||||||
|
if grep -q $3 "/opt/so/saltstack/pillar/data/$1.sls"; then
|
||||||
|
echo "Storage Node Already in There"
|
||||||
|
else
|
||||||
|
echo " $2:" >> /opt/so/saltstack/pillar/data/$1.sls
|
||||||
|
echo " - $3" >> /opt/so/saltstack/pillar/data/$1.sls
|
||||||
|
|
||||||
|
fi
|
||||||
3
pillar/data/nodestab.sls
Normal file
3
pillar/data/nodestab.sls
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
nodestab:
|
||||||
|
|
||||||
|
|
||||||
@@ -10,6 +10,7 @@ base:
|
|||||||
- masters.{{ grains.host }}
|
- masters.{{ grains.host }}
|
||||||
- static
|
- static
|
||||||
- firewall.*
|
- firewall.*
|
||||||
|
- data
|
||||||
|
|
||||||
'G@role:so-node':
|
'G@role:so-node':
|
||||||
- nodes.schedule
|
- nodes.schedule
|
||||||
|
|||||||
@@ -74,3 +74,11 @@ so-kibana:
|
|||||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:5601:5601
|
- 0.0.0.0:5601:5601
|
||||||
|
|
||||||
|
# Keep the setting correct
|
||||||
|
KibanaHappy:
|
||||||
|
cmd.script:
|
||||||
|
shell: /bin/bash
|
||||||
|
runas: socore
|
||||||
|
source: salt://kibana/bin/keepkibanahappy.sh
|
||||||
|
template: jinja
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
{%- if salt['grains.get']('role') == 'so-master' %}
|
{%- if salt['grains.get']('role') == 'so-master' %}
|
||||||
{%- set master = '172.17.0.6' -%}
|
{%- set master = salt['pillar.get']('master:mainip', '') -%}
|
||||||
{%- set nodetype = 'master' %}
|
{%- set nodetype = 'master' %}
|
||||||
{%- else %}
|
{%- else %}
|
||||||
{%- set nodetype = salt['pillar.get']('node:node_type', 'storage') %}
|
{%- set nodetype = salt['pillar.get']('node:node_type', 'storage') %}
|
||||||
{%- set master = grains['master'] %}
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
output {
|
output {
|
||||||
redis {
|
redis {
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ base:
|
|||||||
- redis
|
- redis
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
- logstash
|
- logstash
|
||||||
|
- kibana
|
||||||
|
- utility
|
||||||
|
|
||||||
# Storage node logic
|
# Storage node logic
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
|
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
|
||||||
|
{%- set MASTER = grains['master'] %}
|
||||||
# Wait for ElasticSearch to come up, so that we can query for version infromation
|
# Wait for ElasticSearch to come up, so that we can query for version infromation
|
||||||
echo -n "Waiting for ElasticSearch..."
|
echo -n "Waiting for ElasticSearch..."
|
||||||
COUNT=0
|
COUNT=0
|
||||||
@@ -23,5 +24,12 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "Applying cross cluster search config..."
|
||||||
|
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||||
|
|
||||||
# Add all the storage nodes to cross cluster searching.
|
# Add all the storage nodes to cross cluster searching.
|
||||||
|
{%- for SN, SNIP in salt['pillar.get']('nodestab', {}).iteritems() %}}
|
||||||
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNIP }}:9200"]}}}}}'
|
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNIP }}:9200"]}}}}}'
|
||||||
|
{%- endfor %}
|
||||||
10
salt/utility/init.sls
Normal file
10
salt/utility/init.sls
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# This state is for checking things
|
||||||
|
{% if grains['role'] == 'so-master' %}
|
||||||
|
|
||||||
|
# Make sure Cross Cluster is good. Will need some logic once we have hot/warm
|
||||||
|
crossclusterson:
|
||||||
|
cmd.script:
|
||||||
|
shell: /bin/bash
|
||||||
|
runas: socore
|
||||||
|
source: salt://utility/bin/crossthestreams.sh
|
||||||
|
template: jinja
|
||||||
@@ -577,6 +577,7 @@ salt_master_directories() {
|
|||||||
# Copy over the salt code and templates
|
# Copy over the salt code and templates
|
||||||
cp -R pillar/* /opt/so/saltstack/pillar/
|
cp -R pillar/* /opt/so/saltstack/pillar/
|
||||||
chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh
|
chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh
|
||||||
|
chmod +x /opt/so/saltstack/pillar/data/addtotab.sh
|
||||||
cp -R salt/* /opt/so/saltstack/salt/
|
cp -R salt/* /opt/so/saltstack/salt/
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -639,6 +640,7 @@ set_initial_firewall_policy() {
|
|||||||
if [ $INSTALLTYPE == 'STORAGENODE' ]; then
|
if [ $INSTALLTYPE == 'STORAGENODE' ]; then
|
||||||
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
|
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
|
||||||
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
|
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
|
||||||
|
ssh -i ~/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $HOSTNAME $MAINIP
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
|
if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
|
||||||
@@ -694,6 +696,7 @@ update_sudoers() {
|
|||||||
# Update Sudoers so that socore can accept keys without a password
|
# Update Sudoers so that socore can accept keys without a password
|
||||||
echo "socore ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | sudo tee -a /etc/sudoers
|
echo "socore ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | sudo tee -a /etc/sudoers
|
||||||
echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | sudo tee -a /etc/sudoers
|
echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | sudo tee -a /etc/sudoers
|
||||||
|
echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | sudo tee -a /etc/sudoers
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user