Merge pull request #2402 from Security-Onion-Solutions/dev

2.3.20
This commit is contained in:
Mike Reeves
2020-12-21 10:26:50 -05:00
committed by GitHub
93 changed files with 4046 additions and 2738 deletions

View File

@@ -1,6 +1,6 @@
## Security Onion 2.3.10 ## Security Onion 2.3.20
Security Onion 2.3.10 is here! Security Onion 2.3.20 is here!
## Screenshots ## Screenshots

View File

@@ -1,16 +1,16 @@
### 2.3.10 ISO image built on 2020/11/19 ### 2.3.20 ISO image built on 2020/12/20
### Download and Verify ### Download and Verify
2.3.10 ISO image: 2.3.20 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.10.iso https://download.securityonion.net/file/securityonion/securityonion-2.3.20.iso
MD5: 55E10BAE3D90DF47CA4D5DCCDCB67A96 MD5: E348FA65A46FD3FBA0D574D9C1A0582D
SHA1: 01361123F35CEACE077803BC8074594D57EE653A SHA1: 4A6E6D4E0B31ECA1B72E642E3DB2C186B59009D6
SHA256: 772EA4EFFFF12F026593F5D1CC93DB538CC17B9BA5F60308F1976B6ED7032A8D SHA256: 25DE77097903640771533FA13094D0720A032B70223875F8C77A92F5C44CA687
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.10.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.20.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
@@ -24,22 +24,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.10.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.20.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.10.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.3.20.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.3.10.iso.sig securityonion-2.3.10.iso gpg --verify securityonion-2.3.20.iso.sig securityonion-2.3.20.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Thu 19 Nov 2020 03:38:54 PM EST using RSA key ID FE507013 gpg: Signature made Sun 20 Dec 2020 11:11:28 AM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.3.10 2.3.20

View File

@@ -54,7 +54,8 @@ if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then
salt-call state.apply utility queue=True salt-call state.apply utility queue=True
fi fi
fi fi
#if [ $TYPE == 'nodestab' ]; then if [ $TYPE == 'nodestab' ]; then
salt-call state.apply elasticsearch queue=True
# echo " nodetype: $NODETYPE" >> $local_salt_dir/pillar/data/$TYPE.sls # echo " nodetype: $NODETYPE" >> $local_salt_dir/pillar/data/$TYPE.sls
# echo " hotname: $HOTNAME" >> $local_salt_dir/pillar/data/$TYPE.sls # echo " hotname: $HOTNAME" >> $local_salt_dir/pillar/data/$TYPE.sls
#fi fi

View File

@@ -1,65 +0,0 @@
firewall:
analyst:
ports:
tcp:
- 80
- 443
udp:
beats_endpoint:
ports:
tcp:
- 5044
forward_nodes:
ports:
tcp:
- 443
- 5044
- 5644
- 9822
udp:
manager:
ports:
tcp:
- 1514
- 3200
- 3306
- 4200
- 5601
- 6379
- 7788
- 8086
- 8090
- 9001
- 9200
- 9300
- 9400
- 9500
- 9595
- 9696
udp:
- 1514
minions:
ports:
tcp:
- 3142
- 4505
- 4506
- 5000
- 8080
- 8086
- 55000
osquery_endpoint:
ports:
tcp:
- 8090
search_nodes:
ports:
tcp:
- 6379
- 9300
wazuh_endpoint:
ports:
tcp:
- 1514
udp:
-1514

View File

@@ -3,7 +3,7 @@ base:
- patch.needs_restarting - patch.needs_restarting
- logrotate - logrotate
'*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_import': '*_eval or *_helixsensor or *_heavynode or *_sensor or *_standalone or *_import':
- match: compound - match: compound
- zeek - zeek
@@ -62,7 +62,7 @@ base:
- global - global
- minions.{{ grains.id }} - minions.{{ grains.id }}
'*_helix': '*_helixsensor':
- fireeye - fireeye
- zeeklogs - zeeklogs
- logstash - logstash
@@ -82,6 +82,7 @@ base:
- elasticsearch.search - elasticsearch.search
- global - global
- minions.{{ grains.id }} - minions.{{ grains.id }}
- data.nodestab
'*_import': '*_import':
- zeeklogs - zeeklogs

View File

@@ -17,35 +17,48 @@ def mysql_conn(retry):
log.error(e) log.error(e)
return False return False
mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('manager:mainint')) mainint = __salt__['pillar.get']('host:mainint')
mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0] ip_arr = __salt__['grains.get']('ip4_interfaces').get(mainint)
mysql_up = False mysql_up = False
for i in range(0, retry):
log.debug(f'Connection attempt {i+1}')
try:
db = _mysql.connect(
host=mainip,
user='root',
passwd=__salt__['pillar.get']('secrets:mysql')
)
log.debug(f'Connected to MySQL server on {mainip} after {i} attempts.')
db.query("""SELECT 1;""")
log.debug(f'Successfully completed query against MySQL server on {mainip}')
db.close()
mysql_up = True
break
except _mysql.OperationalError as e:
log.debug(e)
except Exception as e:
log.error('Unexpected error occured.')
log.error(e)
break
sleep(1)
if not mysql_up: if len(ip_arr) == 1:
log.error(f'Could not connect to MySQL server on {mainip} after {retry} attempts.') mainip = ip_arr[0]
if not(retry >= 1):
log.debug('`retry` set to value below 1, resetting it to 1 to prevent errors.')
retry = 1
for i in range(0, retry):
log.debug(f'Connection attempt {i+1}')
try:
db = _mysql.connect(
host=mainip,
user='root',
passwd=__salt__['pillar.get']('secrets:mysql')
)
log.debug(f'Connected to MySQL server on {mainip} after {i+1} attempts.')
db.query("""SELECT 1;""")
log.debug(f'Successfully completed query against MySQL server on {mainip}')
db.close()
mysql_up = True
break
except _mysql.OperationalError as e:
log.debug(e)
except Exception as e:
log.error('Unexpected error occured.')
log.error(e)
break
sleep(1)
if not mysql_up:
log.error(f'Could not connect to MySQL server on {mainip} after {retry} attempts.')
else:
log.error(f'Main interface {mainint} has more than one IP address assigned to it, which is not supported.')
log.debug(f'{mainint}:')
for addr in ip_arr:
log.debug(f' - {addr}')
return mysql_up return mysql_up

View File

@@ -0,0 +1,12 @@
{%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %}
{%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %}
{
"registry-mirrors": [ "https://:5000" ],
"bip": "{{ DOCKERBIND }}",
"default-address-pools": [
{
"base" : "{{ DOCKERRANGE }}",
"size" : 24
}
]
}

View File

@@ -111,7 +111,7 @@ heldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.2.13-2 - containerd.io: 1.2.13-2
- docker-ce: 5:19.03.12~3-0~ubuntu-bionic - docker-ce: 5:19.03.14~3-0~ubuntu-bionic
- hold: True - hold: True
- update_holds: True - update_holds: True
@@ -147,7 +147,7 @@ heldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.2.13-3.2.el7 - containerd.io: 1.2.13-3.2.el7
- docker-ce: 3:19.03.12-3.el7 - docker-ce: 3:19.03.14-3.el7
- hold: True - hold: True
- update_holds: True - update_holds: True
{% endif %} {% endif %}
@@ -244,10 +244,19 @@ commonlogrotateconf:
- dayweek: '*' - dayweek: '*'
{% endif %} {% endif %}
# Manager daemon.json
docker_daemon:
file.managed:
- source: salt://common/files/daemon.json
- name: /etc/docker/daemon.json
- template: jinja
# Make sure Docker is always running # Make sure Docker is always running
docker: docker:
service.running: service.running:
- enable: True - enable: True
- watch:
- file: docker_daemon
{% else %} {% else %}

View File

@@ -135,3 +135,8 @@ fail() {
echo "Exiting." echo "Exiting."
exit 1 exit 1
} }
get_random_value() {
length=${1:-20}
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}

View File

@@ -19,8 +19,7 @@
# #
# Purpose: This script will allow you to test your elastalert rule without entering the Docker container. # Purpose: This script will allow you to test your elastalert rule without entering the Docker container.
. /usr/sbin/so-elastic-common HOST_RULE_DIR=/opt/so/rules/elastalert
OPTIONS="" OPTIONS=""
SKIP=0 SKIP=0
RESULTS_TO_LOG="n" RESULTS_TO_LOG="n"
@@ -29,111 +28,109 @@ FILE_SAVE_LOCATION=""
usage() usage()
{ {
cat <<EOF cat <<EOF
Test Elastalert Rule Test Elastalert Rule
Options: Options:
-h This message -h This message
-a Trigger real alerts instead of the debug alert -a Trigger real alerts instead of the debug alert
-l <path_to_file> Write results to specified log file -l <path_to_file> Write results to specified log file
-o '<options>' Specify Elastalert options ( Ex. --schema-only , --count-only, --days N ) -o '<options>' Specify Elastalert options ( Ex. --schema-only , --count-only, --days N )
-r <rule_name> Specify path/name of rule to test -r <rule_name> Specify filename of rule to test (must exist in $HOST_RULE_DIR; do not include path)
EOF EOF
} }
while getopts "hal:o:r:" OPTION while getopts "hal:o:r:" OPTION
do do
case $OPTION in case $OPTION in
h) h)
usage usage
exit 0 exit 0
;; ;;
a) a)
OPTIONS="--alert" OPTIONS="--alert"
;; ;;
l) l)
RESULTS_TO_LOG="y" RESULTS_TO_LOG="y"
FILE_SAVE_LOCATION=$OPTARG FILE_SAVE_LOCATION=$OPTARG
;; ;;
o)
o) OPTIONS=$OPTARG
OPTIONS=$OPTARG ;;
;; r)
RULE_NAME=$OPTARG
r) SKIP=1
RULE_NAME=$OPTARG ;;
SKIP=1 *)
;; usage
*) exit 0
usage ;;
exit 0 esac
;;
esac
done done
docker_exec(){ docker_exec(){
if [ ${RESULTS_TO_LOG,,} = "y" ] ; then CMD="docker exec -it so-elastalert elastalert-test-rule /opt/elastalert/rules/$RULE_NAME --config /opt/config/elastalert_config.yaml $OPTIONS"
docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS" > $FILE_SAVE_LOCATION if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then
$CMD > "$FILE_SAVE_LOCATION"
else else
docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS" $CMD
fi fi
} }
rule_prompt(){ rule_prompt(){
CURRENT_RULES=$(find /opt/so/rules/elastalert -name "*.yaml") CURRENT_RULES=$(cd "$HOST_RULE_DIR" && find . -type f \( -name "*.yaml" -o -name "*.yml" \) | sed -e 's/^\.\///')
echo if [ -z "$CURRENT_RULES" ]; then
echo "This script will allow you to test an Elastalert rule." echo "There are no rules available to test. Rule files must be placed in the $HOST_RULE_DIR directory."
echo exit 1
echo "Below is a list of active Elastalert rules:" fi
echo echo
echo "This script will allow you to test an Elastalert rule."
echo
echo "Below is a list of available Elastalert rules:"
echo
echo "-----------------------------------" echo "-----------------------------------"
echo echo
echo "$CURRENT_RULES" echo "$CURRENT_RULES"
echo echo
echo "-----------------------------------" echo "-----------------------------------"
echo echo
echo "Note: To test a rule it must be accessible by the Elastalert Docker container." while [ -z "$RULE_NAME" ]; do
echo read -p "Please enter the rule filename you want to test (filename only, no path): " -e RULE_NAME
echo "Make sure to swap the local path (/opt/so/rules/elastalert/) for the docker path (/etc/elastalert/rules/)"
echo "Example: /opt/so/rules/elastalert/nids2hive.yaml would be /etc/elastalert/rules/nids2hive.yaml"
echo
while [ -z $RULE_NAME ]; do
echo "Please enter the file path and rule name you want to test."
read -e RULE_NAME
done done
} }
log_save_prompt(){ log_save_prompt(){
RESULTS_TO_LOG="" RESULTS_TO_LOG=""
while [ -z $RESULTS_TO_LOG ]; do read -p "The results can be rather long. Would you like to write the results to a file? (y/N) " -e RESULTS_TO_LOG
echo "The results can be rather long. Would you like to write the results to a file? (Y/N)"
read RESULTS_TO_LOG
done
} }
log_path_prompt(){ log_path_prompt(){
while [ -z $FILE_SAVE_LOCATION ]; do while [ -z "$FILE_SAVE_LOCATION" ]; do
echo "Please enter the file path and file name." read -p "Please enter the log file path and file name: " -e FILE_SAVE_LOCATION
read -e FILE_SAVE_LOCATION done
done
echo "Depending on the rule this may take a while." echo "Depending on the rule this may take a while."
} }
if [ $SKIP -eq 0 ]; then if [ $SKIP -eq 0 ]; then
rule_prompt rule_prompt
log_save_prompt log_save_prompt
if [ ${RESULTS_TO_LOG,,} = "y" ] ; then if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then
log_path_prompt log_path_prompt
fi fi
fi fi
docker_exec echo
if [ $? -eq 0 ]; then docker_exec
RESULT=$?
echo
if [ $RESULT -eq 0 ]; then
echo "Test completed successfully!" echo "Test completed successfully!"
else else
echo "Something went wrong..." echo "Test failed."
fi fi
echo echo

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
/usr/sbin/so-restart elasticsearch $1
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
/usr/sbin/so-restart kibana $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
/usr/sbin/so-restart logstash $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
/usr/sbin/so-restart filebeat $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
/usr/sbin/so-restart curator $1
{%- endif %}
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
/usr/sbin/so-restart elastalert $1
{%- endif %}

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
/usr/sbin/so-start elasticsearch $1
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
/usr/sbin/so-start kibana $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
/usr/sbin/so-start logstash $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
/usr/sbin/so-start filebeat $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
/usr/sbin/so-start curator $1
{%- endif %}
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
/usr/sbin/so-start elastalert $1
{%- endif %}

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
/usr/sbin/so-stop elasticsearch $1
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
/usr/sbin/so-stop kibana $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
/usr/sbin/so-stop logstash $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
/usr/sbin/so-stop filebeat $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
/usr/sbin/so-stop curator $1
{%- endif %}
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
/usr/sbin/so-stop elastalert $1
{%- endif %}

View File

@@ -15,8 +15,8 @@ if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
salt-call state.apply redis queue=True >> /root/fleet-setup.log salt-call state.apply redis queue=True >> /root/fleet-setup.log
fi fi
docker exec so-fleet fleetctl config set --address https://localhost:8080 --tls-skip-verify --url-prefix /fleet docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet
docker exec -it so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://localhost:8080/fleet)" != "301" ]]; do sleep 5; done' docker exec -it so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://127.0.0.1:8080/fleet)" != "301" ]]; do sleep 5; done'
docker exec so-fleet fleetctl setup --email $1 --password $2 docker exec so-fleet fleetctl setup --email $1 --password $2
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
@@ -26,9 +26,9 @@ docker exec so-fleet /bin/sh -c 'for pack in /packs/palantir/Fleet/Endpoints/pac
docker exec so-fleet fleetctl apply -f /packs/osquery-config.conf docker exec so-fleet fleetctl apply -f /packs/osquery-config.conf
# Enable Fleet # Update the Enroll Secret
echo "Enabling Fleet..." echo "Updating the Enroll Secret..."
salt-call state.apply fleet.event_enable-fleet queue=True >> /root/fleet-setup.log salt-call state.apply fleet.event_update-enroll-secret queue=True >> /root/fleet-setup.log
salt-call state.apply nginx queue=True >> /root/fleet-setup.log salt-call state.apply nginx queue=True >> /root/fleet-setup.log
# Generate osquery install packages # Generate osquery install packages

View File

@@ -19,76 +19,82 @@
IMAGEREPO=securityonion IMAGEREPO=securityonion
container_list() { container_list() {
MANAGERCHECK=$1 MANAGERCHECK=$1
if [ -z "$MANAGERCHECK" ]; then
MANAGERCHECK=so-unknown if [ -z "$MANAGERCHECK" ]; then
if [ -f /etc/salt/grains ]; then MANAGERCHECK=so-unknown
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') if [ -f /etc/salt/grains ]; then
fi MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
fi fi
fi
if [ $MANAGERCHECK == 'so-import' ]; then if [ $MANAGERCHECK == 'so-import' ]; then
TRUSTED_CONTAINERS=( \ TRUSTED_CONTAINERS=(
"so-elasticsearch" \ "so-elasticsearch"
"so-filebeat" \ "so-filebeat"
"so-idstools" \ "so-idstools"
"so-kibana" \ "so-kibana"
"so-kratos" \ "so-kratos"
"so-nginx" \ "so-nginx"
"so-pcaptools" \ "so-pcaptools"
"so-soc" \ "so-soc"
"so-steno" \ "so-steno"
"so-suricata" \ "so-suricata"
"so-zeek" ) "so-zeek"
elif [ $MANAGERCHECK != 'so-helix' ]; then )
TRUSTED_CONTAINERS=( \ elif [ $MANAGERCHECK != 'so-helix' ]; then
"so-acng" \ TRUSTED_CONTAINERS=(
"so-curator" \ "so-acng"
"so-domainstats" \ "so-curator"
"so-elastalert" \ "so-domainstats"
"so-elasticsearch" \ "so-elastalert"
"so-filebeat" \ "so-elasticsearch"
"so-fleet" \ "so-filebeat"
"so-fleet-launcher" \ "so-fleet"
"so-freqserver" \ "so-fleet-launcher"
"so-grafana" \ "so-freqserver"
"so-idstools" \ "so-grafana"
"so-influxdb" \ "so-idstools"
"so-kibana" \ "so-influxdb"
"so-kratos" \ "so-kibana"
"so-logstash" \ "so-kratos"
"so-minio" \ "so-logstash"
"so-mysql" \ "so-minio"
"so-nginx" \ "so-mysql"
"so-pcaptools" \ "so-nginx"
"so-playbook" \ "so-pcaptools"
"so-redis" \ "so-playbook"
"so-soc" \ "so-redis"
"so-soctopus" \ "so-soc"
"so-steno" \ "so-soctopus"
"so-strelka-backend" \ "so-steno"
"so-strelka-filestream" \ "so-strelka-backend"
"so-strelka-frontend" \ "so-strelka-filestream"
"so-strelka-manager" \ "so-strelka-frontend"
"so-suricata" \ "so-strelka-manager"
"so-telegraf" \ "so-suricata"
"so-thehive" \ "so-telegraf"
"so-thehive-cortex" \ "so-thehive"
"so-thehive-es" \ "so-thehive-cortex"
"so-wazuh" \ "so-thehive-es"
"so-zeek" ) "so-wazuh"
else "so-zeek"
TRUSTED_CONTAINERS=( \ )
"so-filebeat" \ else
"so-idstools" \ TRUSTED_CONTAINERS=(
"so-logstash" \ "so-filebeat"
"so-nginx" \ "so-idstools"
"so-redis" \ "so-elasticsearch"
"so-steno" \ "so-logstash"
"so-suricata" \ "so-nginx"
"so-telegraf" \ "so-redis"
"so-zeek" ) "so-steno"
fi "so-suricata"
"so-soc"
"so-telegraf"
"so-zeek"
)
fi
} }
update_docker_containers() { update_docker_containers() {

View File

@@ -27,8 +27,7 @@ function usage {
cat << EOF cat << EOF
Usage: $0 <pcap-file-1> [pcap-file-2] [pcap-file-N] Usage: $0 <pcap-file-1> [pcap-file-2] [pcap-file-N]
Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and made available for review in the Security Onion toolset.
made available for review in the Security Onion toolset.
EOF EOF
} }
@@ -218,6 +217,6 @@ https://{{ URLBASE }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%
or you can manually set your Time Range to be (in UTC): or you can manually set your Time Range to be (in UTC):
From: $START_OLDEST To: $END_NEWEST From: $START_OLDEST To: $END_NEWEST
Please note that it may take 30 seconds or more for events to appear in Onion Hunt. Please note that it may take 30 seconds or more for events to appear in Hunt.
EOF EOF
fi fi

10
salt/common/tools/sbin/so-ip-update Normal file → Executable file
View File

@@ -39,6 +39,7 @@ fi
echo "About to change old IP $OLD_IP to new IP $NEW_IP." echo "About to change old IP $OLD_IP to new IP $NEW_IP."
echo
read -n 1 -p "Would you like to continue? (y/N) " CONTINUE read -n 1 -p "Would you like to continue? (y/N) " CONTINUE
echo echo
@@ -50,9 +51,12 @@ if [ "$CONTINUE" == "y" ]; then
echo "The IP has been changed from $OLD_IP to $NEW_IP." echo "The IP has been changed from $OLD_IP to $NEW_IP."
if [ -z "$SKIP_STATE_APPLY" ]; then echo
echo "Re-applying salt states." read -n 1 -p "The system must reboot to ensure all services have restarted with the new configuration. Reboot now? (y/N)" CONTINUE
salt-call state.highstate queue=True echo
if [ "$CONTINUE" == "y" ]; then
reboot
fi fi
else else
echo "Exiting without changes." echo "Exiting without changes."

View File

@@ -0,0 +1,18 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
$(dirname $0)/so-import-pcap $@

0
salt/common/tools/sbin/so-playbook-reset Normal file → Executable file
View File

View File

@@ -10,4 +10,4 @@ got_root() {
} }
got_root got_root
docker exec so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat' docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1"

0
salt/common/tools/sbin/so-salt-minion-check Normal file → Executable file
View File

78
salt/common/tools/sbin/so-ssh-harden Normal file → Executable file
View File

@@ -2,48 +2,92 @@
. /usr/sbin/so-common . /usr/sbin/so-common
if [[ $1 =~ ^(q|--quiet) ]]; then if [[ $1 =~ ^(-q|--quiet) ]]; then
quiet=true quiet=true
fi fi
before=
after=
reload_required=false
print_sshd_t() { print_sshd_t() {
local string=$1 local string=$1
local state=$2 local state=$2
echo "${state}:" echo "${state}:"
sshd -T | grep "^${string}"
local grep_out
grep_out=$(sshd -T | grep "^${string}")
if [[ $state == "Before" ]]; then
before=$grep_out
else
after=$grep_out
fi
echo $grep_out
}
print_msg() {
local msg=$1
if ! [[ $quiet ]]; then
printf "%s\n" \
"----" \
"$msg" \
"----" \
""
fi
} }
if ! [[ $quiet ]]; then print_sshd_t "ciphers" "Before"; fi if ! [[ $quiet ]]; then print_sshd_t "ciphers" "Before"; fi
sshd -T | grep "^ciphers" | sed -e "s/\(3des-cbc\|aes128-cbc\|aes192-cbc\|aes256-cbc\|arcfour\|arcfour128\|arcfour256\|blowfish-cbc\|cast128-cbc\|rijndael-cbc@lysator.liu.se\)\,\?//g" >> /etc/ssh/sshd_config sshd -T | grep "^ciphers" | sed -e "s/\(3des-cbc\|aes128-cbc\|aes192-cbc\|aes256-cbc\|arcfour\|arcfour128\|arcfour256\|blowfish-cbc\|cast128-cbc\|rijndael-cbc@lysator.liu.se\)\,\?//g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then if ! [[ $quiet ]]; then
print_sshd_t "ciphers" "After" print_sshd_t "ciphers" "After"
echo "" echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi fi
if ! [[ $quiet ]]; then print_sshd_t "kexalgorithms" "Before"; fi if ! [[ $quiet ]]; then print_sshd_t "kexalgorithms" "Before"; fi
sshd -T | grep "^kexalgorithms" | sed -e "s/\(diffie-hellman-group14-sha1\|ecdh-sha2-nistp256\|diffie-hellman-group-exchange-sha256\|diffie-hellman-group1-sha1\|diffie-hellman-group-exchange-sha1\|ecdh-sha2-nistp521\|ecdh-sha2-nistp384\)\,\?//g" >> /etc/ssh/sshd_config sshd -T | grep "^kexalgorithms" | sed -e "s/\(diffie-hellman-group14-sha1\|ecdh-sha2-nistp256\|diffie-hellman-group-exchange-sha256\|diffie-hellman-group1-sha1\|diffie-hellman-group-exchange-sha1\|ecdh-sha2-nistp521\|ecdh-sha2-nistp384\)\,\?//g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then if ! [[ $quiet ]]; then
print_sshd_t "kexalgorithms" "After" print_sshd_t "kexalgorithms" "After"
echo "" echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi fi
if ! [[ $quiet ]]; then print_sshd_t "macs" "Before"; fi if ! [[ $quiet ]]; then print_sshd_t "macs" "Before"; fi
sshd -T | grep "^macs" | sed -e "s/\(hmac-sha2-512,\|umac-128@openssh.com,\|hmac-sha2-256,\|umac-64@openssh.com,\|hmac-sha1,\|hmac-sha1-etm@openssh.com,\|umac-64-etm@openssh.com,\|hmac-sha1\)//g" >> /etc/ssh/sshd_config sshd -T | grep "^macs" | sed -e "s/\(hmac-sha2-512,\|umac-128@openssh.com,\|hmac-sha2-256,\|umac-64@openssh.com,\|hmac-sha1,\|hmac-sha1-etm@openssh.com,\|umac-64-etm@openssh.com,\|hmac-sha1\)//g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then if ! [[ $quiet ]]; then
print_sshd_t "macs" "After" print_sshd_t "macs" "After"
echo "" echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi fi
if ! [[ $quiet ]]; then print_sshd_t "hostkeyalgorithms" "Before"; fi if ! [[ $quiet ]]; then print_sshd_t "hostkeyalgorithms" "Before"; fi
sshd -T | grep "^hostkeyalgorithms" | sed "s|ecdsa-sha2-nistp256,||g" | sed "s|ssh-rsa,||g" >> /etc/ssh/sshd_config sshd -T | grep "^hostkeyalgorithms" | sed "s|ecdsa-sha2-nistp256,||g" | sed "s|ssh-rsa,||g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then if ! [[ $quiet ]]; then
print_sshd_t "hostkeyalgorithms" "After" print_sshd_t "hostkeyalgorithms" "After"
echo "" echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi
if [[ $reload_required == true ]]; then
print_msg "Reloading sshd to load config changes..."
systemctl reload sshd
fi fi
{% if grains['os'] != 'CentOS' %} {% if grains['os'] != 'CentOS' %}
echo "----" print_msg "[ WARNING ] Any new ssh sessions will need to remove and reaccept the ECDSA key for this server before reconnecting."
echo "[ WARNING ] Any new ssh sessions will need to remove and reaccept the ECDSA key for this server before reconnecting."
echo "----"
{% endif %} {% endif %}

View File

@@ -0,0 +1,63 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MANAGER = salt['grains.get']('master') %}
{%- set VERSION = salt['pillar.get']('global:soversion') %}
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
TESTRULE=$1
TESTPCAP=$2
. /usr/sbin/so-common
echo ""
echo "==============="
echo "Running all.rules and $TESTRULE against the following pcap: $TESTPCAP"
echo ""
sleep 3
rm -rf /tmp/nids-testing/output
mkdir -p /tmp/nids-testing/output
chown suricata:socore /tmp/nids-testing/output
mkdir -p /tmp/nids-testing/rules
cp /opt/so/conf/suricata/rules/all.rules /tmp/nids-testing/rules/all.rules
cat $TESTRULE >> /tmp/nids-testing/rules/all.rules
echo "==== Begin Suricata Output ==="
docker run --rm \
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
-v /tmp/nids-testing/rules:/etc/suricata/rules:ro \
-v "$TESTPCAP:/input.pcap:ro" \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
-v /tmp/nids-testing/output/:/nsm/:rw \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -v -k none -r /input.pcap -l /tmp --init-errors-fatal
echo "==== End Suricata Output ==="
echo ""
echo "If any alerts hit, they will be displayed below:"
echo ""
cat /tmp/nids-testing/output/* | jq
echo ""
echo "End so-suricata-testrule"
echo "==============="
echo ""

0
salt/common/tools/sbin/so-wazuh-user-add Normal file → Executable file
View File

0
salt/common/tools/sbin/so-wazuh-user-passwd Normal file → Executable file
View File

0
salt/common/tools/sbin/so-wazuh-user-remove Normal file → Executable file
View File

View File

@@ -16,6 +16,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} {%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
echo "Starting to check for yara rule updates at $(date)..."
output_dir="/opt/so/saltstack/default/salt/strelka/rules" output_dir="/opt/so/saltstack/default/salt/strelka/rules"
mkdir -p $output_dir mkdir -p $output_dir
repos="$output_dir/repos.txt" repos="$output_dir/repos.txt"
@@ -27,6 +29,7 @@ updatecounter=0
{% if ISAIRGAP is sameas true %} {% if ISAIRGAP is sameas true %}
echo "Airgap mode enabled."
clone_dir="/nsm/repo/rules/strelka" clone_dir="/nsm/repo/rules/strelka"
repo_name="signature-base" repo_name="signature-base"
@@ -73,17 +76,17 @@ done
echo "Done!" echo "Done!"
if [ "$newcounter" -gt 0 ];then if [ "$newcounter" -gt 0 ];then
echo "$newcounter new rules added." echo "$newcounter new rules added."
fi fi
if [ "$updatecounter" -gt 0 ];then if [ "$updatecounter" -gt 0 ];then
echo "$updatecounter rules updated." echo "$updatecounter rules updated."
fi fi
if [ "$deletecounter" -gt 0 ];then if [ "$deletecounter" -gt 0 ];then
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo." echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
fi fi
{% else %} {% else %}
@@ -162,4 +165,6 @@ else
echo "No connectivity to Github...exiting..." echo "No connectivity to Github...exiting..."
exit 1 exit 1
fi fi
{%- endif -%} {% endif %}
echo "Finished rule updates at $(date)..."

View File

@@ -155,6 +155,14 @@ copy_new_files() {
cd /tmp cd /tmp
} }
generate_and_clean_tarballs() {
local new_version
new_version=$(cat $UPDATE_DIR/VERSION)
[ -d /opt/so/repo ] || mkdir -p /opt/so/repo
tar -cxf "/opt/so/repo/$new_version.tar.gz" "$UPDATE_DIR"
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
}
highstate() { highstate() {
# Run a highstate. # Run a highstate.
salt-call state.highstate -l info queue=True salt-call state.highstate -l info queue=True
@@ -197,6 +205,7 @@ pillar_changes() {
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2 [[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3 [[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0 [[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
[[ "$INSTALLEDVERSION" == 2.3.0 ]] || [[ "$INSTALLEDVERSION" == 2.3.1 ]] || [[ "$INSTALLEDVERSION" == 2.3.2 ]] || [[ "$INSTALLEDVERSION" == 2.3.10 ]] && 2.3.0_to_2.3.20
} }
rc1_to_rc2() { rc1_to_rc2() {
@@ -212,8 +221,8 @@ rc1_to_rc2() {
sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls; sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls;
# Adding play values to the global.sls # Adding play values to the global.sls
local HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) local HIVEPLAYSECRET=$(get_random_value)
local CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) local CORTEXPLAYSECRET=$(get_random_value)
sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
@@ -231,7 +240,7 @@ rc1_to_rc2() {
while read p; do while read p; do
local NAME=$(echo $p | awk '{print $1}') local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}') local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}') local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME" echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}' curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
done </tmp/nodes.txt done </tmp/nodes.txt
@@ -275,9 +284,50 @@ rc3_to_2.3.0() {
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
{ {
echo "playbook_admin: $(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)" echo "playbook_admin: $(get_random_value)"
echo "playbook_automation: $(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)" echo "playbook_automation: $(get_random_value)"
} >> /opt/so/saltstack/local/pillar/secrets.sls } >> /opt/so/saltstack/local/pillar/secrets.sls
INSTALLEDVERSION=2.3.0
}
2.3.0_to_2.3.20(){
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
# Remove PCAP from global
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls
# Add checking interval to glbal
echo "sensoroni:" >> /opt/so/saltstack/local/pillar/global.sls
echo " node_checkin_interval_ms: 10000" >> /opt/so/saltstack/local/pillar/global.sls
# Update pillar fiels for new sensoroni functionality
for file in /opt/so/saltstack/local/pillar/minions/*; do
echo "sensoroni:" >> $file
echo " node_description:" >> $file
local SOMEADDRESS=$(cat $file | grep mainip | tail -n 1 | awk '{print $2'})
echo " node_address: $SOMEADDRESS" >> $file
done
# Remove old firewall config to reduce confusion
rm -f /opt/so/saltstack/default/pillar/firewall/ports.sls
# Fix daemon.json by managing it
echo "docker:" >> /opt/so/saltstack/local/pillar/global.sls
DOCKERGREP=$(cat /etc/docker/daemon.json | grep base | awk {'print $3'} | cut -f1 -d"," | tr -d '"')
if [ -z "$DOCKERGREP" ]; then
echo " range: '172.17.0.0/24'" >> /opt/so/saltstack/local/pillar/global.sls
echo " bip: '172.17.0.1/24'" >> /opt/so/saltstack/local/pillar/global.sls
else
DOCKERSTUFF="${DOCKERGREP//\"}"
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
echo " range: '$DOCKERSTUFF/24'" >> /opt/so/saltstack/local/pillar/global.sls
echo " bip: '$DOCKERSTUFFBIP'" >> /opt/so/saltstack/local/pillar/global.sls
fi
INSTALLEDVERSION=2.3.20
} }
space_check() { space_check() {
@@ -292,6 +342,29 @@ space_check() {
} }
thehive_maint() {
echo -n "Waiting for TheHive..."
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
echo "Migrating thehive databases if needed."
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate"
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate"
fi
}
unmount_update() { unmount_update() {
cd /tmp cd /tmp
umount /tmp/soagupdate umount /tmp/soagupdate
@@ -371,11 +444,18 @@ verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one. # Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}') CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}') GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" ]]; then CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
echo "This version of the soup script is up to date. Proceeding." echo "This version of the soup script is up to date. Proceeding."
else else
echo "You are not running the latest soup version. Updating soup." echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True salt-call state.apply common queue=True
echo "" echo ""
echo "soup has been updated. Please run soup again." echo "soup has been updated. Please run soup again."
@@ -415,19 +495,24 @@ if [ $is_airgap -eq 0 ]; then
airgap_mounted airgap_mounted
else else
echo "Cloning Security Onion github repo into $UPDATE_DIR." echo "Cloning Security Onion github repo into $UPDATE_DIR."
echo "Removing previous upgrade sources."
rm -rf $UPDATE_DIR
clone_to_tmp clone_to_tmp
fi fi
if [ -f /usr/sbin/so-image-common ]; then
. /usr/sbin/so-image-common
else
add_common
fi
echo "" echo ""
echo "Verifying we have the latest soup script." echo "Verifying we have the latest soup script."
verify_latest_update_script verify_latest_update_script
echo "" echo ""
echo "Generating new repo archive"
generate_and_clean_tarballs
if [ -f /usr/sbin/so-image-common ]; then
. /usr/sbin/so-image-common
else
add_common
fi
echo "Let's see if we need to update Security Onion." echo "Let's see if we need to update Security Onion."
upgrade_check upgrade_check
space_check space_check
@@ -444,6 +529,16 @@ if [ $is_airgap -eq 0 ]; then
else else
update_registry update_registry
update_docker_containers "soup" update_docker_containers "soup"
FEATURESCHECK=$(lookup_pillar features elastic)
if [[ "$FEATURESCHECK" == "True" ]]; then
TRUSTED_CONTAINERS=(
"so-elasticsearch"
"so-filebeat"
"so-kibana"
"so-logstash"
)
update_docker_containers "features" "-features"
fi
fi fi
echo "" echo ""
echo "Stopping Salt Minion service." echo "Stopping Salt Minion service."
@@ -532,14 +627,15 @@ echo "Running a highstate. This could take several minutes."
salt-call state.highstate -l info queue=True salt-call state.highstate -l info queue=True
playbook playbook
unmount_update unmount_update
thehive_maint
if [ "$UPGRADESALT" == "1" ]; then if [ "$UPGRADESALT" == "1" ]; then
echo "" echo ""
echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
if [ $is_airgap -eq 0 ]; then if [ $is_airgap -eq 0 ]; then
salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' cmd.run "yum clean all" salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' cmd.run "yum clean all"
fi fi
salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion queue=True salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion queue=True
echo "" echo ""
fi fi

View File

@@ -1,6 +1,6 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('master') %}
{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1']%} {% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1','2.3.2']%}
{% for VERSION in OLDVERSIONS %} {% for VERSION in OLDVERSIONS %}
remove_images_{{ VERSION }}: remove_images_{{ VERSION }}:

View File

@@ -1,18 +1,19 @@
{%- set NODE_ROUTE_TYPE = salt['pillar.get']('elasticsearch:node_route_type', 'hot') %} {%- set NODE_ROUTE_TYPE = salt['pillar.get']('elasticsearch:node_route_type', 'hot') %}
{%- if salt['pillar.get']('elasticsearch:hot_warm_enabled') or salt['pillar.get']('elasticsearch:true_cluster') %} {%- set NODEIP = salt['pillar.get']('elasticsearch:mainip') %}
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:true_cluster_name', '') %} {%- set FEATURES = salt['pillar.get']('elastic:features', False) %}
{%- set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
{%- if TRUECLUSTER is sameas true %}
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:true_cluster_name') %}
{%- else %} {%- else %}
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername', '') %} {%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername') %}
{%- endif %} {%- endif %}
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
cluster.name: "{{ ESCLUSTERNAME }}" cluster.name: "{{ ESCLUSTERNAME }}"
network.host: 0.0.0.0 network.host: 0.0.0.0
# minimum_master_nodes need to be explicitly set when bound on a public IP # minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters # set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288 # Details: https://github.com/elastic/elasticsearch/pull/17288
discovery.zen.minimum_master_nodes: 1 #discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly. # This is a test -- if this is here, then the volume is mounted correctly.
path.logs: /var/log/elasticsearch path.logs: /var/log/elasticsearch
action.destructive_requires_name: true action.destructive_requires_name: true
@@ -37,10 +38,30 @@ cluster.routing.allocation.disk.watermark.flood_stage: 98%
#xpack.security.http.ssl.client_authentication: none #xpack.security.http.ssl.client_authentication: none
#xpack.security.authc: #xpack.security.authc:
# anonymous: # anonymous:
# username: anonymous_user # username: anonymous_user
# roles: superuser # roles: superuser
# authz_exception: true # authz_exception: true
{%- endif %} {%- endif %}
node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ grains.host }}
node.name: {{ ESCLUSTERNAME }}
script.max_compilations_rate: 1000/1m script.max_compilations_rate: 1000/1m
{%- if TRUECLUSTER is sameas true %}
{%- if grains.role == 'so-manager' %}
{%- if salt['pillar.get']('nodestab', {}) %}
node.roles: [ master, data, remote_cluster_client ]
discovery.seed_hosts:
- {{ grains.master }}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
- {{ SN.split('_')|first }}
{%- endfor %}
{%- endif %}
{%- else %}
node.roles: [ data, ingest ]
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
discovery.seed_hosts:
- {{ grains.master }}
{%- endif %}
{%- endif %}
{%- if TRUECLUSTER is sameas false %}
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
{%- endif %}
indices.query.bool.max_clause_count: 1500

View File

@@ -6,7 +6,7 @@
{ "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } }, { "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } },
{ "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } }, { "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } },
{ "json": { "field": "winlog.data", "target_field": "temp", "ignore_failure": true } }, { "json": { "field": "winlog.data", "target_field": "temp", "ignore_failure": true } },
{ "rename": { "field": "temp.Data", "target_field": "winlog.event_data", "ignore_missing": true } }, { "rename": { "field": "temp.EventData", "target_field": "winlog.event_data", "ignore_missing": true } },
{ "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } }, { "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } },
{ "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } }, { "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
@@ -22,4 +22,4 @@
{ "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} }, { "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} },
{ "pipeline": { "name": "common" } } { "pipeline": { "name": "common" } }
] ]
} }

View File

@@ -63,7 +63,7 @@
{ "rename": { "field": "fields.module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } }, { "rename": { "field": "fields.module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
{ "set": { "if": "ctx.containsKey('rule') && ctx.rule != null", "field": "event.dataset", "value": "alert", "override": true } }, { "set": { "if": "ctx.rule != null && ctx.rule.name != null", "field": "event.dataset", "value": "alert", "override": true } },
{ "pipeline": { "name": "common" } } { "pipeline": { "name": "common" } }
] ]
} }

View File

@@ -0,0 +1,10 @@
{
"description" : "suricata.ftp_data",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.ftp_data.command", "target_field": "ftp.command", "ignore_missing": true } },
{ "rename": { "field": "message2.ftp_data.filename","target_field": "ftp.argument", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -6,7 +6,7 @@
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } }, { "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
{ "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } }, { "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } },
{ "set": { "field": "event.category", "value": "host", "override": true } }, { "set": { "field": "event.category", "value": "host", "override": true } },
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } } { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }
] ]
} }

View File

@@ -18,6 +18,10 @@
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{%- set MANAGER = salt['grains.get']('master') %} {%- set MANAGER = salt['grains.get']('master') %}
. /usr/sbin/so-common . /usr/sbin/so-common
# Exit on errors, since all lines must succeed
set -e
# Check to see if we have extracted the ca cert. # Check to see if we have extracted the ca cert.
if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then
docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt

View File

@@ -21,23 +21,26 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%} {% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
{% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
{% set MANAGERIP = salt['pillar.get']('global:managerip') %}
{% if FEATURES is sameas true %}
{%- if FEATURES is sameas true %}
{% set FEATUREZ = "-features" %} {% set FEATUREZ = "-features" %}
{% else %} {% else %}
{% set FEATUREZ = '' %} {% set FEATUREZ = '' %}
{% endif %} {% endif %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %}
{% set esclustername = salt['pillar.get']('manager:esclustername', '') %} {% set esclustername = salt['pillar.get']('manager:esclustername') %}
{% set esheap = salt['pillar.get']('manager:esheap', '') %} {% set esheap = salt['pillar.get']('manager:esheap') %}
{% set ismanager = True %} {% set ismanager = True %}
{% elif grains['role'] in ['so-node','so-heavynode'] %} {% elif grains['role'] in ['so-node','so-heavynode'] %}
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %} {% set esclustername = salt['pillar.get']('elasticsearch:esclustername') %}
{% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %} {% set esheap = salt['pillar.get']('elasticsearch:esheap') %}
{% set ismanager = False %} {% set ismanager = False %}
{% elif grains['role'] == 'so-helix' %}
{% set ismanager = True %} {# Solely for the sake of running so-catrust #}
{% endif %} {% endif %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
@@ -86,6 +89,8 @@ capemz:
- user: 939 - user: 939
- group: 939 - group: 939
{% if grains['role'] != 'so-helix' %}
# Add ES Group # Add ES Group
elasticsearchgroup: elasticsearchgroup:
group.present: group.present:
@@ -188,16 +193,21 @@ so-elasticsearch:
- name: so-elasticsearch - name: so-elasticsearch
- user: elasticsearch - user: elasticsearch
- extra_hosts: - extra_hosts:
{% if ismanager %}
- {{ grains.host }}:{{ NODEIP }} - {{ grains.host }}:{{ NODEIP }}
{%- if ismanager %} {% if salt['pillar.get']('nodestab', {}) %}
{%- if salt['pillar.get']('nodestab', {}) %} {% for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
- {{ SN.split('_')|first }}:{{ SNDATA.ip }} - {{ SN.split('_')|first }}:{{ SNDATA.ip }}
{%- endfor %} {% endfor %}
{%- endif %} {% endif %}
{%- endif %} {% else %}
- {{ grains.host }}:{{ NODEIP }}
- {{ MANAGER }}:{{ MANAGERIP }}
{% endif %}
- environment: - environment:
{% if TRUECLUSTER is sameas false or (TRUECLUSTER is sameas true and not salt['pillar.get']('nodestab', {})) %}
- discovery.type=single-node - discovery.type=single-node
{% endif %}
- ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }} - ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }}
ulimits: ulimits:
- memlock=-1:-1 - memlock=-1:-1
@@ -251,10 +261,12 @@ so-elasticsearch-templates:
- template: jinja - template: jinja
{% endif %} {% endif %}
{% endif %} {# if grains['role'] != 'so-helix' #}
{% else %} {% else %}
elasticsearch_state_not_allowed: elasticsearch_state_not_allowed:
test.fail_without_changes: test.fail_without_changes:
- name: elasticsearch_state_not_allowed - name: elasticsearch_state_not_allowed
{% endif %} {% endif %} {# if 'elasticsearch' in top_states #}

View File

@@ -1,6 +1,7 @@
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} {% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{% import_yaml 'firewall/portgroups.yaml' as portgroups %} {% import_yaml 'firewall/portgroups.yaml' as portgroups %}
{% set portgroups = portgroups.firewall.aliases.ports %} {% set portgroups = portgroups.firewall.aliases.ports %}
{% set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
role: role:
eval: eval:
@@ -32,9 +33,9 @@ role:
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.fleet_api }} - {{ portgroups.fleet_api }}
- {{ portgroups.sensoroni }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }} - {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }} - {{ portgroups.beats_5644 }}
search_node: search_node:
@@ -42,6 +43,11 @@ role:
- {{ portgroups.redis }} - {{ portgroups.redis }}
- {{ portgroups.minio }} - {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }} - {{ portgroups.elasticsearch_node }}
heavy_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
self: self:
portgroups: portgroups:
- {{ portgroups.syslog}} - {{ portgroups.syslog}}
@@ -121,12 +127,12 @@ role:
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.fleet_api }} - {{ portgroups.fleet_api }}
- {{ portgroups.sensoroni }}
{% if ISAIRGAP is sameas true %} {% if ISAIRGAP is sameas true %}
- {{ portgroups.yum }} - {{ portgroups.yum }}
{% endif %} {% endif %}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }} - {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }} - {{ portgroups.beats_5644 }}
search_node: search_node:
@@ -135,6 +141,12 @@ role:
- {{ portgroups.minio }} - {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }} - {{ portgroups.elasticsearch_node }}
- {{ portgroups.beats_5644 }} - {{ portgroups.beats_5644 }}
heavy_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.beats_5644 }}
self: self:
portgroups: portgroups:
- {{ portgroups.syslog}} - {{ portgroups.syslog}}
@@ -208,10 +220,10 @@ role:
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.fleet_api }} - {{ portgroups.fleet_api }}
- {{ portgroups.sensoroni }}
- {{ portgroups.yum }} - {{ portgroups.yum }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }} - {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }} - {{ portgroups.beats_5644 }}
search_node: search_node:
@@ -219,6 +231,11 @@ role:
- {{ portgroups.redis }} - {{ portgroups.redis }}
- {{ portgroups.minio }} - {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }} - {{ portgroups.elasticsearch_node }}
heavy_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
self: self:
portgroups: portgroups:
- {{ portgroups.syslog}} - {{ portgroups.syslog}}
@@ -292,10 +309,10 @@ role:
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.fleet_api }} - {{ portgroups.fleet_api }}
- {{ portgroups.sensoroni }}
- {{ portgroups.yum }} - {{ portgroups.yum }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }} - {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }} - {{ portgroups.beats_5644 }}
search_node: search_node:
@@ -303,6 +320,11 @@ role:
- {{ portgroups.redis }} - {{ portgroups.redis }}
- {{ portgroups.minio }} - {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }} - {{ portgroups.elasticsearch_node }}
heavy_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
self: self:
portgroups: portgroups:
- {{ portgroups.syslog}} - {{ portgroups.syslog}}
@@ -372,9 +394,9 @@ role:
- {{ portgroups.osquery_8080 }} - {{ portgroups.osquery_8080 }}
- {{ portgroups.influxdb }} - {{ portgroups.influxdb }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
- {{ portgroups.sensoroni }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }} - {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }} - {{ portgroups.beats_5644 }}
search_node: search_node:
@@ -425,6 +447,11 @@ role:
elasticsearch_rest: elasticsearch_rest:
portgroups: portgroups:
- {{ portgroups.elasticsearch_rest }} - {{ portgroups.elasticsearch_rest }}
{% if TRUE_CLUSTER %}
search_node:
portgroups:
- {{ portgroups.elasticsearch_node }}
{% endif %}
self: self:
portgroups: portgroups:
- {{ portgroups.syslog}} - {{ portgroups.syslog}}
@@ -533,11 +560,11 @@ role:
minion: minion:
portgroups: portgroups:
- {{ portgroups.docker_registry }} - {{ portgroups.docker_registry }}
- {{ portgroups.sensoroni }}
sensor: sensor:
portgroups: portgroups:
- {{ portgroups.beats_5044 }} - {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }} - {{ portgroups.beats_5644 }}
- {{ portgroups.sensoroni }}
search_node: search_node:
portgroups: portgroups:
- {{ portgroups.redis }} - {{ portgroups.redis }}

View File

@@ -95,6 +95,7 @@ enable_docker_user_established:
- match: conntrack - match: conntrack
- ctstate: 'RELATED,ESTABLISHED' - ctstate: 'RELATED,ESTABLISHED'
{% set count = namespace(value=0) %}
{% for chain, hg in assigned_hostgroups.chain.items() %} {% for chain, hg in assigned_hostgroups.chain.items() %}
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %} {% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
{% for action in ['insert', 'delete' ] %} {% for action in ['insert', 'delete' ] %}
@@ -103,8 +104,9 @@ enable_docker_user_established:
{% for portgroup in portgroups.portgroups %} {% for portgroup in portgroups.portgroups %}
{% for proto, ports in portgroup.items() %} {% for proto, ports in portgroup.items() %}
{% for port in ports %} {% for port in ports %}
{% set count.value = count.value + 1 %}
{{action}}_{{chain}}_{{hostgroup}}_{{ip}}_{{port}}_{{proto}}: {{action}}_{{chain}}_{{hostgroup}}_{{ip}}_{{port}}_{{proto}}_{{count.value}}:
iptables.{{action}}: iptables.{{action}}:
- table: filter - table: filter
- chain: {{ chain }} - chain: {{ chain }}

View File

@@ -1,10 +1,3 @@
{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %}
{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %}
{% if FLEETNODE or FLEETMANAGER %}
{% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret default') %}
{% else %}
{% set ENROLLSECRET = '' %}
{% endif %}
{% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
@@ -14,5 +7,4 @@ so/fleet:
action: 'enablefleet' action: 'enablefleet'
hostname: {{ grains.host }} hostname: {{ grains.host }}
mainip: {{ MAINIP }} mainip: {{ MAINIP }}
role: {{ grains.role }} role: {{ grains.role }}
enroll-secret: {{ ENROLLSECRET }}

View File

@@ -0,0 +1,7 @@
{% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret default') %}
so/fleet:
event.send:
- data:
action: 'update-enrollsecret'
enroll-secret: {{ ENROLLSECRET }}

File diff suppressed because it is too large Load Diff

View File

@@ -20,8 +20,43 @@
"links": [], "links": [],
"panels": [ "panels": [
{ {
"cacheTimeout": null,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {},
"unit": "percent",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 60
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": 80
}
]
},
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"nullValueMode": "connected"
},
"overrides": []
},
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
@@ -31,47 +66,16 @@
"id": 2, "id": 2,
"links": [], "links": [],
"options": { "options": {
"fieldOptions": { "alertThreshold": true
"calcs": [
"lastNotNull"
],
"defaults": {
"mappings": [],
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "orange",
"value": 60
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": [],
"values": false
},
"orientation": "auto",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.6.2", "pluginVersion": "7.3.4",
"targets": [ "targets": [
{ {
"dsType": "influxdb", "dsType": "influxdb",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -119,13 +123,80 @@
"operator": "=", "operator": "=",
"value": "cpu-total" "value": "cpu-total"
} }
] ],
"alias": "Usage"
} }
], ],
"title": "{{ SERVERNAME }} - CPU",
"type": "graph",
"cacheTimeout": null,
"renderer": "flot",
"yaxes": [
{
"label": null,
"show": true,
"logBase": 1,
"min": null,
"max": null,
"format": "percent",
"$$hashKey": "object:395"
},
{
"label": null,
"show": false,
"logBase": 1,
"min": null,
"max": null,
"format": "short",
"$$hashKey": "object:396"
}
],
"xaxis": {
"show": true,
"mode": "time",
"name": null,
"values": [],
"buckets": null
},
"yaxis": {
"align": false,
"alignLevel": null
},
"lines": true,
"fill": 1,
"fillGradient": 0,
"linewidth": 1,
"dashes": false,
"hiddenSeries": false,
"dashLength": 10,
"spaceLength": 10,
"points": false,
"pointradius": 2,
"bars": false,
"stack": false,
"percentage": false,
"legend": {
"show": false,
"values": false,
"min": false,
"max": false,
"current": false,
"total": false,
"avg": false
},
"nullPointMode": "connected",
"steppedLine": false,
"tooltip": {
"value_type": "individual",
"shared": true,
"sort": 0
},
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "{{ SERVERNAME }} - CPU", "aliasColors": {},
"type": "gauge" "seriesOverrides": [],
"thresholds": [],
"timeRegions": []
}, },
{ {
"datasource": "InfluxDB", "datasource": "InfluxDB",
@@ -260,7 +331,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -400,7 +471,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -540,7 +611,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -622,68 +693,58 @@
} }
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 0, "x": 0,
"y": 5 "y": 5
}, },
"id": 12, "hiddenSeries": false,
"links": [], "id": 73,
"options": { "legend": {
"fieldOptions": { "avg": false,
"calcs": [ "current": false,
"lastNotNull" "max": false,
], "min": false,
"defaults": { "show": false,
"mappings": [ "total": false,
{ "values": false
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ ROOTFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ ROOTFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ ROOTFS * '.90'|float }}"
}
]
},
"unit": "bytes"
},
"overrides": [],
"values": false
},
"orientation": "horizontal",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.6.2", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -703,7 +764,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -728,72 +789,102 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/)", "title": "{{ SERVERNAME }} - Disk Used(/)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 4, "x": 4,
"y": 5 "y": 5
}, },
"id": 35, "hiddenSeries": false,
"links": [], "id": 74,
"options": { "legend": {
"fieldOptions": { "avg": false,
"calcs": [ "current": false,
"lastNotNull" "max": false,
], "min": false,
"defaults": { "show": false,
"mappings": [ "total": false,
{ "values": false
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ NSMFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ NSMFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ NSMFS * '.90'|float }}"
}
]
},
"unit": "bytes"
},
"overrides": [],
"values": false
},
"orientation": "horizontal",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.6.2", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -813,7 +904,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -838,8 +929,48 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/nsm)", "title": "{{ SERVERNAME }} - Disk Used(/nsm)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"aliasColors": {}, "aliasColors": {},
@@ -888,7 +1019,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1028,7 +1159,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1168,7 +1299,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1308,7 +1439,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1454,7 +1585,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1504,7 +1635,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1554,7 +1685,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1604,7 +1735,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1654,7 +1785,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1704,7 +1835,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1846,7 +1977,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1910,7 +2041,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2286,7 +2417,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2330,7 +2461,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2374,7 +2505,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2510,7 +2641,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2574,7 +2705,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2734,7 +2865,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2777,7 +2908,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2820,7 +2951,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2863,7 +2994,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2997,7 +3128,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3041,7 +3172,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3084,7 +3215,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3219,7 +3350,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3283,7 +3414,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3434,7 +3565,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3481,7 +3612,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3616,7 +3747,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3751,7 +3882,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3815,7 +3946,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },

View File

@@ -21,8 +21,43 @@
"links": [], "links": [],
"panels": [ "panels": [
{ {
"cacheTimeout": null,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {},
"unit": "percent",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 60
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": 80
}
]
},
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"nullValueMode": "connected"
},
"overrides": []
},
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
@@ -32,57 +67,16 @@
"id": 2, "id": 2,
"links": [], "links": [],
"options": { "options": {
"fieldOptions": { "alertThreshold": true
"calcs": [
"lastNotNull"
],
"defaults": {
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": 100,
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 60
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": [],
"values": false
},
"orientation": "horizontal",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.7.3", "pluginVersion": "7.3.4",
"targets": [ "targets": [
{ {
"dsType": "influxdb", "dsType": "influxdb",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -130,11 +124,80 @@
"operator": "=", "operator": "=",
"value": "cpu-total" "value": "cpu-total"
} }
] ],
"alias": "Usage"
} }
], ],
"title": "{{ SERVERNAME }} - CPU", "title": "{{ SERVERNAME }} - CPU",
"type": "gauge" "type": "graph",
"cacheTimeout": null,
"renderer": "flot",
"yaxes": [
{
"label": null,
"show": true,
"logBase": 1,
"min": null,
"max": null,
"format": "percent",
"$$hashKey": "object:395"
},
{
"label": null,
"show": false,
"logBase": 1,
"min": null,
"max": null,
"format": "short",
"$$hashKey": "object:396"
}
],
"xaxis": {
"show": true,
"mode": "time",
"name": null,
"values": [],
"buckets": null
},
"yaxis": {
"align": false,
"alignLevel": null
},
"lines": true,
"fill": 1,
"fillGradient": 0,
"linewidth": 1,
"dashes": false,
"hiddenSeries": false,
"dashLength": 10,
"spaceLength": 10,
"points": false,
"pointradius": 2,
"bars": false,
"stack": false,
"percentage": false,
"legend": {
"show": false,
"values": false,
"min": false,
"max": false,
"current": false,
"total": false,
"avg": false
},
"nullPointMode": "connected",
"steppedLine": false,
"tooltip": {
"value_type": "individual",
"shared": true,
"sort": 0
},
"timeFrom": null,
"timeShift": null,
"aliasColors": {},
"seriesOverrides": [],
"thresholds": [],
"timeRegions": []
}, },
{ {
"datasource": "InfluxDB", "datasource": "InfluxDB",
@@ -269,7 +332,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -628,68 +691,58 @@
} }
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 0, "x": 0,
"y": 5 "y": 5
}, },
"id": 12, "hiddenSeries": false,
"links": [], "id": 73,
"options": { "legend": {
"fieldOptions": { "avg": false,
"calcs": [ "current": false,
"lastNotNull" "max": false,
], "min": false,
"defaults": { "show": false,
"mappings": [ "total": false,
{ "values": false
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ ROOTFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ ROOTFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ ROOTFS * '.90'|float }}"
}
]
},
"unit": "bytes"
},
"overrides": [],
"values": false
},
"orientation": "horizontal",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.7.3", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -709,7 +762,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -734,73 +787,102 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/)", "title": "{{ SERVERNAME }} - Disk Used(/)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 4, "x": 4,
"y": 5 "y": 5
}, },
"id": 35, "hiddenSeries": false,
"links": [], "id": 74,
"options": { "legend": {
"fieldOptions": { "avg": false,
"calcs": [ "current": false,
"lastNotNull" "max": false,
], "min": false,
"defaults": { "show": false,
"decimals": 2, "total": false,
"mappings": [ "values": false
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ NSMFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ NSMFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ NSMFS * '.90'|float }}"
}
]
},
"unit": "bytes"
},
"overrides": [],
"values": false
},
"orientation": "horizontal",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.7.3", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -820,7 +902,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -845,8 +927,48 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/nsm)", "title": "{{ SERVERNAME }} - Disk Used(/nsm)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"aliasColors": {}, "aliasColors": {},
@@ -1034,7 +1156,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1458,7 +1580,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1508,7 +1630,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1558,7 +1680,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1608,7 +1730,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1658,7 +1780,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1708,7 +1830,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1850,7 +1972,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1914,7 +2036,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2062,7 +2184,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2190,7 +2312,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2373,7 +2495,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2417,7 +2539,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2461,7 +2583,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2597,7 +2719,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2661,7 +2783,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2809,7 +2931,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2937,7 +3059,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3072,7 +3194,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3116,7 +3238,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3159,7 +3281,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3495,7 +3617,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3627,7 +3749,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4005,7 +4127,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4135,7 +4257,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4182,7 +4304,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4313,7 +4435,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4453,7 +4575,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4496,7 +4618,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4539,7 +4661,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4582,7 +4704,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },

View File

@@ -20,8 +20,43 @@
"links": [], "links": [],
"panels": [ "panels": [
{ {
"cacheTimeout": null,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {},
"unit": "percent",
"min": 0,
"max": 100,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 60
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": 80
}
]
},
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"nullValueMode": "connected"
},
"overrides": []
},
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
@@ -31,57 +66,16 @@
"id": 2, "id": 2,
"links": [], "links": [],
"options": { "options": {
"fieldOptions": { "alertThreshold": true
"calcs": [
"lastNotNull"
],
"defaults": {
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": 100,
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": 60
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": [],
"values": false
},
"orientation": "horizontal",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.6.2", "pluginVersion": "7.3.4",
"targets": [ "targets": [
{ {
"dsType": "influxdb", "dsType": "influxdb",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -129,11 +123,80 @@
"operator": "=", "operator": "=",
"value": "cpu-total" "value": "cpu-total"
} }
] ],
"alias": "Usage"
} }
], ],
"title": "{{ SERVERNAME }} - CPU", "title": "{{ SERVERNAME }} - CPU",
"type": "gauge" "type": "graph",
"cacheTimeout": null,
"renderer": "flot",
"yaxes": [
{
"label": null,
"show": true,
"logBase": 1,
"min": null,
"max": null,
"format": "percent",
"$$hashKey": "object:395"
},
{
"label": null,
"show": false,
"logBase": 1,
"min": null,
"max": null,
"format": "short",
"$$hashKey": "object:396"
}
],
"xaxis": {
"show": true,
"mode": "time",
"name": null,
"values": [],
"buckets": null
},
"yaxis": {
"align": false,
"alignLevel": null
},
"lines": true,
"fill": 1,
"fillGradient": 0,
"linewidth": 1,
"dashes": false,
"hiddenSeries": false,
"dashLength": 10,
"spaceLength": 10,
"points": false,
"pointradius": 2,
"bars": false,
"stack": false,
"percentage": false,
"legend": {
"show": false,
"values": false,
"min": false,
"max": false,
"current": false,
"total": false,
"avg": false
},
"nullPointMode": "connected",
"steppedLine": false,
"tooltip": {
"value_type": "individual",
"shared": true,
"sort": 0
},
"timeFrom": null,
"timeShift": null,
"aliasColors": {},
"seriesOverrides": [],
"thresholds": [],
"timeRegions": []
}, },
{ {
"datasource": "InfluxDB", "datasource": "InfluxDB",
@@ -268,7 +331,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -408,7 +471,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -548,7 +611,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -676,7 +739,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -757,68 +820,58 @@
} }
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 0, "x": 0,
"y": 5 "y": 5
}, },
"id": 12, "hiddenSeries": false,
"links": [], "id": 73,
"options": { "legend": {
"fieldOptions": { "avg": false,
"calcs": [ "current": false,
"lastNotNull" "max": false,
], "min": false,
"defaults": { "show": false,
"mappings": [ "total": false,
{ "values": false
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ ROOTFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ ROOTFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ ROOTFS * '.90'|float }}"
}
]
},
"unit": "bytes"
},
"overrides": [],
"values": false
},
"orientation": "horizontal",
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "6.6.2", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -838,7 +891,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -863,27 +916,102 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/)", "title": "{{ SERVERNAME }} - Disk Used(/)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"cacheTimeout": null, "fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 4, "x": 4,
"y": 5 "y": 5
}, },
"id": 35, "hiddenSeries": false,
"links": [], "id": 74,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -903,7 +1031,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -928,54 +1056,48 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/nsm)", "title": "{{ SERVERNAME }} - Disk Used(/nsm)",
"type": "gauge", "tooltip": {
"options": { "shared": true,
"showThresholdMarkers": true, "sort": 0,
"showThresholdLabels": false, "value_type": "individual"
"fieldOptions": {
"values": false,
"calcs": [
"lastNotNull"
],
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ NSMFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ NSMFS * '.90'|float }}"
}
]
},
"mappings": [
{
"op": "=",
"text": "N/A",
"value": "null",
"id": 0,
"type": 1
}
],
"unit": "bytes",
"nullValueMode": "connected",
"min": 0,
"max": "{{ NSMFS}}",
"decimals": 2
},
"overrides": []
},
"orientation": "horizontal"
}, },
"pluginVersion": "6.6.2" "type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"aliasColors": {}, "aliasColors": {},
@@ -1024,7 +1146,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1152,7 +1274,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1280,7 +1402,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1408,7 +1530,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1548,7 +1670,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1598,7 +1720,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1648,7 +1770,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1698,7 +1820,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1748,7 +1870,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1798,7 +1920,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1976,7 +2098,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2106,7 +2228,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2153,7 +2275,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2340,7 +2462,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2384,7 +2506,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2428,7 +2550,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2568,7 +2690,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2611,7 +2733,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2654,7 +2776,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2697,7 +2819,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2832,7 +2954,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2896,7 +3018,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3051,7 +3173,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3095,7 +3217,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3138,7 +3260,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3269,7 +3391,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3404,7 +3526,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3468,7 +3590,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },

View File

@@ -34,7 +34,8 @@
} }
] ]
}, },
"unit": "s" "unit": "s",
"decimals": 2
}, },
"overrides": [] "overrides": []
}, },
@@ -109,23 +110,13 @@
"type": "stat" "type": "stat"
}, },
{ {
"cacheTimeout": null,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {},
"mappings": [ "unit": "percent",
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": 100,
"min": 0, "min": 0,
"nullValueMode": "connected", "max": 100,
"thresholds": { "thresholds": {
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
@@ -143,7 +134,16 @@
} }
] ]
}, },
"unit": "percent" "mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"nullValueMode": "connected"
}, },
"overrides": [] "overrides": []
}, },
@@ -156,25 +156,16 @@
"id": 2, "id": 2,
"links": [], "links": [],
"options": { "options": {
"orientation": "horizontal", "alertThreshold": true
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "pluginVersion": "7.3.4",
"targets": [ "targets": [
{ {
"dsType": "influxdb", "dsType": "influxdb",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -222,11 +213,80 @@
"operator": "=", "operator": "=",
"value": "cpu-total" "value": "cpu-total"
} }
] ],
"alias": "Usage"
} }
], ],
"title": "{{ SERVERNAME }} - CPU", "title": "{{ SERVERNAME }} - CPU",
"type": "gauge" "type": "graph",
"cacheTimeout": null,
"renderer": "flot",
"yaxes": [
{
"label": null,
"show": true,
"logBase": 1,
"min": null,
"max": null,
"format": "percent",
"$$hashKey": "object:395"
},
{
"label": null,
"show": false,
"logBase": 1,
"min": null,
"max": null,
"format": "short",
"$$hashKey": "object:396"
}
],
"xaxis": {
"show": true,
"mode": "time",
"name": null,
"values": [],
"buckets": null
},
"yaxis": {
"align": false,
"alignLevel": null
},
"lines": true,
"fill": 1,
"fillGradient": 0,
"linewidth": 1,
"dashes": false,
"hiddenSeries": false,
"dashLength": 10,
"spaceLength": 10,
"points": false,
"pointradius": 2,
"bars": false,
"stack": false,
"percentage": false,
"legend": {
"show": false,
"values": false,
"min": false,
"max": false,
"current": false,
"total": false,
"avg": false
},
"nullPointMode": "connected",
"steppedLine": false,
"tooltip": {
"value_type": "individual",
"shared": true,
"sort": 0
},
"timeFrom": null,
"timeShift": null,
"aliasColors": {},
"seriesOverrides": [],
"thresholds": [],
"timeRegions": []
}, },
{ {
"aliasColors": {}, "aliasColors": {},
@@ -414,7 +474,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -553,7 +613,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -692,7 +752,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -773,72 +833,58 @@
} }
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {}
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ ROOTFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ ROOTFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ ROOTFS * '.90'|float }}"
}
]
},
"unit": "bytes"
}, },
"overrides": [] "overrides": []
}, },
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 0, "x": 0,
"y": 5 "y": 5
}, },
"id": 12, "hiddenSeries": false,
"links": [], "id": 73,
"options": { "legend": {
"orientation": "horizontal", "avg": false,
"reduceOptions": { "current": false,
"calcs": [ "max": false,
"lastNotNull" "min": false,
], "show": false,
"fields": "", "total": false,
"values": false "values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -858,7 +904,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -883,76 +929,102 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/)", "title": "{{ SERVERNAME }} - Disk Used(/)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {}
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ NSMFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ NSMFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ NSMFS * '.90'|float }}"
}
]
},
"unit": "bytes"
}, },
"overrides": [] "overrides": []
}, },
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 4, "x": 4,
"y": 5 "y": 5
}, },
"id": 31, "hiddenSeries": false,
"links": [], "id": 74,
"options": { "legend": {
"orientation": "horizontal", "avg": false,
"reduceOptions": { "current": false,
"calcs": [ "max": false,
"lastNotNull" "min": false,
], "show": false,
"fields": "", "total": false,
"values": false "values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -972,7 +1044,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -997,28 +1069,58 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/nsm)", "title": "{{ SERVERNAME }} - Disk Used(/nsm)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"cacheTimeout": null,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {},
"decimals": 2, "unit": "s",
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": 1209600,
"min": 0, "min": 0,
"nullValueMode": "connected", "max": null,
"decimals": 2,
"thresholds": { "thresholds": {
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
@@ -1036,7 +1138,16 @@
} }
] ]
}, },
"unit": "s" "mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"nullValueMode": "connected"
}, },
"overrides": [] "overrides": []
}, },
@@ -1049,25 +1160,16 @@
"id": 22, "id": 22,
"links": [], "links": [],
"options": { "options": {
"orientation": "horizontal", "alertThreshold": true
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "pluginVersion": "7.3.4",
"targets": [ "targets": [
{ {
"dsType": "influxdb", "dsType": "influxdb",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1103,12 +1205,82 @@
"operator": "=", "operator": "=",
"value": "{{ SERVERNAME }}" "value": "{{ SERVERNAME }}"
} }
] ],
"alias": "Oldest Pcap"
} }
], ],
"title": "{{ SERVERNAME }} - PCAP Retention", "title": "{{ SERVERNAME }} - PCAP Retention",
"type": "gauge" "type": "graph",
}, "renderer": "flot",
"yaxes": [
{
"label": "",
"show": true,
"logBase": 1,
"min": null,
"max": null,
"format": "s",
"$$hashKey": "object:643",
"decimals": 2
},
{
"label": null,
"show": false,
"logBase": 1,
"min": null,
"max": null,
"format": "short",
"$$hashKey": "object:644"
}
],
"xaxis": {
"show": true,
"mode": "time",
"name": null,
"values": [],
"buckets": null
},
"yaxis": {
"align": false,
"alignLevel": null
},
"lines": true,
"fill": 1,
"linewidth": 1,
"dashLength": 10,
"spaceLength": 10,
"pointradius": 2,
"legend": {
"show": true,
"values": false,
"min": false,
"max": false,
"current": false,
"total": false,
"avg": false
},
"nullPointMode": "connected",
"tooltip": {
"value_type": "individual",
"shared": true,
"sort": 0
},
"aliasColors": {},
"seriesOverrides": [],
"thresholds": [],
"timeRegions": [],
"cacheTimeout": null,
"timeFrom": null,
"timeShift": null,
"fillGradient": 0,
"dashes": false,
"hiddenSeries": false,
"points": false,
"bars": false,
"stack": false,
"percentage": false,
"steppedLine": false
},
{ {
"aliasColors": {}, "aliasColors": {},
"bars": false, "bars": false,
@@ -1162,7 +1334,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1307,7 +1479,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1452,7 +1624,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1609,7 +1781,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1659,7 +1831,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1709,7 +1881,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1759,7 +1931,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1809,7 +1981,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1859,7 +2031,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1999,7 +2171,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2132,7 +2304,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2271,7 +2443,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2410,7 +2582,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2553,7 +2725,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2786,7 +2958,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2850,7 +3022,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3016,7 +3188,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3059,7 +3231,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3102,7 +3274,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3145,7 +3317,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3281,7 +3453,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3329,7 +3501,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3475,7 +3647,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3685,7 +3857,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3729,7 +3901,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3773,7 +3945,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3914,7 +4086,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3958,7 +4130,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4001,7 +4173,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4142,7 +4314,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4206,7 +4378,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },

View File

@@ -21,23 +21,13 @@
"links": [], "links": [],
"panels": [ "panels": [
{ {
"cacheTimeout": null,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {},
"mappings": [ "unit": "percent",
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": 100,
"min": 0, "min": 0,
"nullValueMode": "connected", "max": 100,
"thresholds": { "thresholds": {
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
@@ -55,7 +45,16 @@
} }
] ]
}, },
"unit": "percent" "mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"nullValueMode": "connected"
}, },
"overrides": [] "overrides": []
}, },
@@ -68,25 +67,16 @@
"id": 2, "id": 2,
"links": [], "links": [],
"options": { "options": {
"orientation": "horizontal", "alertThreshold": true
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "pluginVersion": "7.3.4",
"targets": [ "targets": [
{ {
"dsType": "influxdb", "dsType": "influxdb",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -134,12 +124,84 @@
"operator": "=", "operator": "=",
"value": "cpu-total" "value": "cpu-total"
} }
] ],
"alias": "Usage"
} }
], ],
"title": "{{ SERVERNAME }} - CPU", "title": "{{ SERVERNAME }} - CPU",
"type": "gauge" "type": "graph",
"cacheTimeout": null,
"renderer": "flot",
"yaxes": [
{
"label": null,
"show": true,
"logBase": 1,
"min": null,
"max": null,
"format": "percent",
"$$hashKey": "object:395"
},
{
"label": null,
"show": false,
"logBase": 1,
"min": null,
"max": null,
"format": "short",
"$$hashKey": "object:396"
}
],
"xaxis": {
"show": true,
"mode": "time",
"name": null,
"values": [],
"buckets": null
},
"yaxis": {
"align": false,
"alignLevel": null
},
"lines": true,
"fill": 1,
"fillGradient": 0,
"linewidth": 1,
"dashes": false,
"hiddenSeries": false,
"dashLength": 10,
"spaceLength": 10,
"points": false,
"pointradius": 2,
"bars": false,
"stack": false,
"percentage": false,
"legend": {
"show": false,
"values": false,
"min": false,
"max": false,
"current": false,
"total": false,
"avg": false
},
"nullPointMode": "connected",
"steppedLine": false,
"tooltip": {
"value_type": "individual",
"shared": true,
"sort": 0
},
"timeFrom": null,
"timeShift": null,
"aliasColors": {},
"seriesOverrides": [],
"thresholds": [],
"timeRegions": []
}, },
{ {
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
@@ -284,7 +346,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -574,7 +636,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -714,7 +776,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -796,72 +858,58 @@
} }
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {}
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ ROOTFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ ROOTFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ ROOTFS * '.90'|float }}"
}
]
},
"unit": "bytes"
}, },
"overrides": [] "overrides": []
}, },
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 0, "x": 0,
"y": 5 "y": 5
}, },
"id": 12, "hiddenSeries": false,
"links": [], "id": 73,
"options": { "legend": {
"orientation": "horizontal", "avg": false,
"reduceOptions": { "current": false,
"calcs": [ "max": false,
"lastNotNull" "min": false,
], "show": false,
"fields": "", "total": false,
"values": false "values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -881,7 +929,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -906,76 +954,102 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/)", "title": "{{ SERVERNAME }} - Disk Used(/)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"cacheTimeout": null, "aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {}
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": "{{ NSMFS }}",
"min": 0,
"nullValueMode": "connected",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgba(50, 172, 45, 0.97)",
"value": null
},
{
"color": "rgba(237, 129, 40, 0.89)",
"value": "{{ NSMFS * '.80'|float }}"
},
{
"color": "rgba(245, 54, 54, 0.9)",
"value": "{{ NSMFS * '.90'|float }}"
}
]
},
"unit": "bytes"
}, },
"overrides": [] "overrides": []
}, },
"fill": 1,
"fillGradient": 0,
"gridPos": { "gridPos": {
"h": 5, "h": 5,
"w": 4, "w": 4,
"x": 4, "x": 4,
"y": 5 "y": 5
}, },
"id": 31, "hiddenSeries": false,
"links": [], "id": 74,
"options": { "legend": {
"orientation": "horizontal", "avg": false,
"reduceOptions": { "current": false,
"calcs": [ "max": false,
"lastNotNull" "min": false,
], "show": false,
"fields": "", "total": false,
"values": false "values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [ "targets": [
{ {
"dsType": "influxdb", "alias": "Used",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -995,7 +1069,7 @@
[ [
{ {
"params": [ "params": [
"used" "used_percent"
], ],
"type": "field" "type": "field"
}, },
@@ -1020,8 +1094,48 @@
] ]
} }
], ],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Disk Used(/nsm)", "title": "{{ SERVERNAME }} - Disk Used(/nsm)",
"type": "gauge" "tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:708",
"format": "percent",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:709",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}, },
{ {
"aliasColors": {}, "aliasColors": {},
@@ -1366,7 +1480,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1511,7 +1625,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1669,7 +1783,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1719,7 +1833,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1769,7 +1883,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1819,7 +1933,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1869,7 +1983,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -1919,7 +2033,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2067,7 +2181,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2131,7 +2245,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2285,7 +2399,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2424,7 +2538,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2621,7 +2735,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2665,7 +2779,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2709,7 +2823,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -2851,7 +2965,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3045,24 +3159,14 @@
"type": "stat" "type": "stat"
}, },
{ {
"cacheTimeout": null,
"datasource": "InfluxDB", "datasource": "InfluxDB",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"custom": {}, "custom": {},
"decimals": 2, "unit": "s",
"mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"max": 1209600,
"min": 0, "min": 0,
"nullValueMode": "connected", "max": null,
"decimals": 2,
"thresholds": { "thresholds": {
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
@@ -3080,7 +3184,16 @@
} }
] ]
}, },
"unit": "s" "mappings": [
{
"id": 0,
"op": "=",
"text": "N/A",
"type": 1,
"value": "null"
}
],
"nullValueMode": "connected"
}, },
"overrides": [] "overrides": []
}, },
@@ -3093,25 +3206,16 @@
"id": 22, "id": 22,
"links": [], "links": [],
"options": { "options": {
"orientation": "horizontal", "alertThreshold": true
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
}, },
"pluginVersion": "7.0.5", "pluginVersion": "7.3.4",
"targets": [ "targets": [
{ {
"dsType": "influxdb", "dsType": "influxdb",
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3147,13 +3251,81 @@
"operator": "=", "operator": "=",
"value": "{{ SERVERNAME }}" "value": "{{ SERVERNAME }}"
} }
] ],
"alias": "Oldest Pcap"
} }
], ],
"title": "{{ SERVERNAME }} - PCAP Retention",
"type": "graph",
"renderer": "flot",
"yaxes": [
{
"label": "",
"show": true,
"logBase": 1,
"min": null,
"max": null,
"format": "s",
"$$hashKey": "object:643",
"decimals": 2
},
{
"label": null,
"show": false,
"logBase": 1,
"min": null,
"max": null,
"format": "short",
"$$hashKey": "object:644"
}
],
"xaxis": {
"show": true,
"mode": "time",
"name": null,
"values": [],
"buckets": null
},
"yaxis": {
"align": false,
"alignLevel": null
},
"lines": true,
"fill": 1,
"linewidth": 1,
"dashLength": 10,
"spaceLength": 10,
"pointradius": 2,
"legend": {
"show": false,
"values": false,
"min": false,
"max": false,
"current": false,
"total": false,
"avg": false
},
"nullPointMode": "connected",
"tooltip": {
"value_type": "individual",
"shared": true,
"sort": 0
},
"aliasColors": {},
"seriesOverrides": [],
"thresholds": [],
"timeRegions": [],
"cacheTimeout": null,
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "{{ SERVERNAME }} - PCAP Retention", "fillGradient": 0,
"type": "gauge" "dashes": false,
"hiddenSeries": false,
"points": false,
"bars": false,
"stack": false,
"percentage": false,
"steppedLine": false
}, },
{ {
"aliasColors": { "aliasColors": {
@@ -3215,7 +3387,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3259,7 +3431,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3302,7 +3474,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3648,7 +3820,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3794,7 +3966,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -3937,7 +4109,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -4550,7 +4722,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -5172,7 +5344,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -5220,7 +5392,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -5378,7 +5550,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -5483,7 +5655,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -5912,7 +6084,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -5955,7 +6127,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -5998,7 +6170,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },
@@ -6041,7 +6213,7 @@
"groupBy": [ "groupBy": [
{ {
"params": [ "params": [
"$Interval" "$__interval"
], ],
"type": "time" "type": "time"
}, },

View File

@@ -0,0 +1,26 @@
grafana:
config:
server:
root_url: "%(protocol)s://%(domain)s/grafana/"
auth.anonymous:
enabled: true
org_name: Main Org.
org_role: Viewer
smtp:
enabled: false
# host: localhost:25
# user: myuser
# If the password contains # or ; you have to wrap it with triple quotes wrapped by single quotes. Ex '"""#password;"""'
# password: mypassword
# cert_file: /etc/grafana/config/files/smtp_cert_file.crt
# key_file: /etc/grafana/config/files/smtp_key_file.key
# skip_verify: false
from_address: admin@grafana.localhost
from_name: Grafana
# ehlo_identity: dashboard.example.com
# auth.ldap:
# enabled: false
# config_file: /etc/grafana/config/files/ldap.toml
# allow_sign_up: true
# enterprise:
# license_path: /opt/so/conf/grafana/etc/files/license.jwt

View File

@@ -8,6 +8,7 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
editable: true editable: true
allowUiUpdates: true
options: options:
path: /etc/grafana/grafana_dashboards/manager path: /etc/grafana/grafana_dashboards/manager
- name: 'Manager Search' - name: 'Manager Search'
@@ -15,6 +16,7 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
editable: true editable: true
allowUiUpdates: true
options: options:
path: /etc/grafana/grafana_dashboards/managersearch path: /etc/grafana/grafana_dashboards/managersearch
- name: 'Sensor Nodes' - name: 'Sensor Nodes'
@@ -22,6 +24,7 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
editable: true editable: true
allowUiUpdates: true
options: options:
path: /etc/grafana/grafana_dashboards/sensor_nodes path: /etc/grafana/grafana_dashboards/sensor_nodes
- name: 'Search Nodes' - name: 'Search Nodes'
@@ -29,6 +32,7 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
editable: true editable: true
allowUiUpdates: true
options: options:
path: /etc/grafana/grafana_dashboards/search_nodes path: /etc/grafana/grafana_dashboards/search_nodes
- name: 'Standalone' - name: 'Standalone'
@@ -36,6 +40,7 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
editable: true editable: true
allowUiUpdates: true
options: options:
path: /etc/grafana/grafana_dashboards/standalone path: /etc/grafana/grafana_dashboards/standalone
{%- else %} {%- else %}
@@ -44,6 +49,7 @@ providers:
type: file type: file
disableDeletion: false disableDeletion: false
editable: true editable: true
allowUiUpdates: true
options: options:
path: /etc/grafana/grafana_dashboards/eval path: /etc/grafana/grafana_dashboards/eval
{% endif %} {% endif %}

View File

@@ -0,0 +1 @@
For files that are referenced inside the Grafana config, place them in /opt/so/saltstack/local/salt/grafana/etc/files/. This would include keys used for smtp or a Grafana enterprise license file.

View File

@@ -1,482 +0,0 @@
##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change
# possible values : production, development
;app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
;instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
;data = /var/lib/grafana
# Temporary files in `data` directory older than given duration will be removed
;temp_data_lifetime = 24h
# Directory where grafana can store logs
;logs = /var/log/grafana
# Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins
# folder that contains provisioning config files that grafana will apply on startup and while running.
;provisioning = conf/provisioning
#################################### Server ####################################
[server]
# Protocol (http, https, socket)
;protocol = http
# The ip address to bind to, empty will bind to all interfaces
;http_addr =
# The http port to use
;http_port = 3000
# The public facing domain name used to access grafana from a browser
;domain = localhost
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
root_url = %(protocol)s://%(domain)s/grafana/
# Log web requests
;router_logging = false
# the path relative working path
;static_root_path = public
# enable gzip
;enable_gzip = false
# https certs & key file
;cert_file =
;cert_key =
# Unix socket path
;socket =
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable
# For "sqlite3" only, path relative to data_path setting
;path = grafana.db
# Max idle conn setting default is 2
;max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
;max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
;conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
log_queries =
#################################### Session ####################################
[session]
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
;provider = file
# Provider config options
# memory: not have any config yet
# file: session dir path, is relative to grafana data_path
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
;provider_config = sessions
# Session cookie name
;cookie_name = grafana_sess
# If you use session in https only, default is false
;cookie_secure = false
# Session life time, default is 86400
;session_life_time = 86400
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
;logging = false
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true
# Set to false to disable all checks to https://grafana.net
# for new vesions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.com to get latest versions
;check_for_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
#################################### Security ####################################
[security]
# default admin user, created on startup
;admin_user = admin
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# Auto-login remember days
;login_remember_days = 7
;cookie_username = grafana_user
;cookie_remember_name = grafana_remember
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
# disable protection against brute force login attempts
;disable_brute_force_login_protection = false
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io
# remove expired snapshot
;snapshot_remove_expired = true
#################################### Dashboards History ##################
[dashboards]
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
;versions_to_keep = 20
#################################### Users ###############################
[users]
# disable user signup / registration
;allow_sign_up = true
# Allow non admin users to create organizations
;allow_org_create = true
# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true
# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer
# Background text for the user field on the login page
;login_hint = email or username
# Default UI theme ("dark" or "light")
;default_theme = dark
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
;external_manage_info =
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
;viewers_can_edit = false
[auth]
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
;signout_redirect_url =
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
enabled = true
# specify organization name that should be used for unauthenticated users
org_name = Main Org.
# specify role for unauthenticated users
org_role = Viewer
#################################### Github Auth ##########################
[auth.github]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://github.com/login/oauth/authorize
;token_url = https://github.com/login/oauth/access_token
;api_url = https://api.github.com/user
;team_ids =
;allowed_organizations =
#################################### Google Auth ##########################
[auth.google]
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
;auth_url = https://accounts.google.com/o/oauth2/auth
;token_url = https://accounts.google.com/o/oauth2/token
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
;allowed_domains =
#################################### Generic OAuth ##########################
[auth.generic_oauth]
;enabled = false
;name = OAuth
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://foo.bar/login/oauth/authorize
;token_url = https://foo.bar/login/oauth/access_token
;api_url = https://foo.bar/user
;team_ids =
;allowed_organizations =
;tls_skip_verify_insecure = false
;tls_client_cert =
;tls_client_key =
;tls_client_ca =
#################################### Grafana.com Auth ####################
[auth.grafana_com]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email
;allowed_organizations =
#################################### Auth Proxy ##########################
[auth.proxy]
;enabled = false
;header_name = X-WEBAUTH-USER
;header_property = username
;auto_sign_up = true
;ldap_sync_ttl = 60
;whitelist = 192.168.1.1, 192.168.2.1
;headers = Email:X-User-Email, Name:X-User-Name
#################################### Basic Auth ##########################
[auth.basic]
;enabled = true
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
;host = localhost:25
;user =
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
;password =
;cert_file =
;key_file =
;skip_verify = false
;from_address = admin@grafana.localhost
;from_name = Grafana
# EHLO identity in SMTP dialog (defaults to instance_name)
;ehlo_identity = dashboard.example.com
[emails]
;welcome_email_on_sign_up = false
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
;mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
# Max line number of single file, default is 1000000
;max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_size_shift = 28
# Segment log daily, default is true
;daily_rotate = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
;execute_alerts = true
# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state)
;error_or_timeout = alerting
# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
;nodata_or_nullvalues = no_data
# Alert notifications can include images, but rendering many images at the same time can overload the server
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
;concurrent_render_limit = 5
#################################### Explore #############################
[explore]
# Enable the Explore section
;enabled = false
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /metrics
[metrics]
# Disable / Enable internal metrics
;enabled = true
# Publish interval
;interval_seconds = 10
# Send internal metrics to Graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
;address =
;prefix = prod.grafana.%(instance_name)s.
#################################### Distributed tracing ############
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
;always_included_tag = tag1:value1
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
;sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
;sampler_param = 1
#################################### Grafana.com integration ##########################
# Url used to import dashboards directly from Grafana.com
[grafana_com]
;url = https://grafana.com
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav, gcs, azure_blob, local)
;provider =
[external_image_storage.s3]
;bucket =
;region =
;path =
;access_key =
;secret_key =
[external_image_storage.webdav]
;url =
;public_url =
;username =
;password =
[external_image_storage.gcs]
;key_file =
;bucket =
;path =
[external_image_storage.azure_blob]
;account_name =
;account_key =
;container_name =
[external_image_storage.local]
# does not require any configuration
[rendering]
# Options to configure external image rendering server like https://github.com/grafana/grafana-image-renderer
;server_url =
;callback_url =
[enterprise]
# Path to a valid Grafana Enterprise license.jwt file
;license_path =

View File

@@ -0,0 +1,12 @@
{%- macro write_config_line(cfg) %}
{%- for k,v in cfg.items() -%}
{{ k }} = {{ v }}
{% endfor %}
{%- endmacro %}
{{ write_config_line(config.get("default", {})) }}
{% for header, cfg in config.items() %}
{%- if header == "default" %}{% continue %}{% endif %}
[{{ header }}]
{{ write_config_line(cfg) }}
{% endfor %}

View File

@@ -9,6 +9,10 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set ADMINPASS = salt['pillar.get']('secrets:grafana_admin') %} {% set ADMINPASS = salt['pillar.get']('secrets:grafana_admin') %}
{% import_yaml 'grafana/defaults.yaml' as default_settings %}
{% set GRAFANA_SETTINGS = salt['grains.filter_by'](default_settings, default='grafana', merge=salt['pillar.get']('grafana', {})) %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Grafana all the things # Grafana all the things
@@ -75,13 +79,44 @@ grafanadashsndir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
grafanaconf: grafana-dashboard-config:
file.recurse: file.managed:
- name: /opt/so/conf/grafana/etc - name: /opt/so/conf/grafana/etc/dashboards/dashboard.yml
- user: 939 - user: 939
- group: 939 - group: 939
- template: jinja - template: jinja
- source: salt://grafana/etc - source: salt://grafana/etc/dashboards/dashboard.yml
- makedirs: True
grafana-datasources-config:
file.managed:
- name: /opt/so/conf/grafana/etc/datasources/influxdb.yaml
- user: 939
- group: 939
- template: jinja
- source: salt://grafana/etc/datasources/influxdb.yaml
- makedirs: True
grafana-config:
file.managed:
- name: /opt/so/conf/grafana/etc/grafana.ini
- user: 939
- group: 939
- template: jinja
- source: salt://grafana/etc/grafana.ini.jinja
- context:
config: {{ GRAFANA_SETTINGS.config|json }}
# these are the files that are referenced inside the config such as smtp:cert_file, smtp:cert_key, auth.ldap:config_file, enterprise:license_path
grafana-config-files:
file.recurse:
- name: /opt/so/conf/grafana/etc/files
- user: 939
- group: 939
- source: salt://grafana/etc/files
- makedirs: True
{% if salt['pillar.get']('managertab', False) %} {% if salt['pillar.get']('managertab', False) %}
{% for SN, SNDATA in salt['pillar.get']('managertab', {}).items() %} {% for SN, SNDATA in salt['pillar.get']('managertab', {}).items() %}
@@ -229,6 +264,7 @@ so-grafana:
- /opt/so/conf/grafana/etc/datasources:/etc/grafana/provisioning/datasources:rw - /opt/so/conf/grafana/etc/datasources:/etc/grafana/provisioning/datasources:rw
- /opt/so/conf/grafana/etc/dashboards:/etc/grafana/provisioning/dashboards:rw - /opt/so/conf/grafana/etc/dashboards:/etc/grafana/provisioning/dashboards:rw
- /opt/so/conf/grafana/grafana_dashboards:/etc/grafana/grafana_dashboards:rw - /opt/so/conf/grafana/grafana_dashboards:/etc/grafana/grafana_dashboards:rw
- /opt/so/conf/grafana/etc/files:/etc/grafana/config/files:ro
- environment: - environment:
- GF_SECURITY_ADMIN_PASSWORD={{ ADMINPASS }} - GF_SECURITY_ADMIN_PASSWORD={{ ADMINPASS }}
- port_bindings: - port_bindings:

View File

@@ -91,7 +91,7 @@ append_so-aptcacherng_so-status.conf:
strelka_yara_update: strelka_yara_update:
cron.present: cron.present:
- user: root - user: root
- name: '/usr/sbin/so-yara-update > /dev/null 2>&1' - name: '/usr/sbin/so-yara-update >> /nsm/strelka/log/yara-update.log 2>&1'
- hour: '7' - hour: '7'
- minute: '1' - minute: '1'
{% else %} {% else %}

View File

@@ -98,7 +98,7 @@ http {
{%- if role == 'fleet' %} {%- if role == 'fleet' %}
server { server {
listen 443 ssl http2; listen 443 ssl http2;
server_name {{ url_base }}; server_name {{ main_ip }};
root /opt/socore/html; root /opt/socore/html;
index index.html; index index.html;

View File

@@ -1,23 +0,0 @@
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%}
{%- set CHECKININTERVALMS = salt['pillar.get']('pcap:sensor_checkin_interval_ms', 10000) -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"info",
"agent": {
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
"verifyCert": false,
"modules": {
"importer": {},
"statickeyauth": {
"apiKey": "{{ SENSORONIKEY }}"
},
"stenoquery": {
"executablePath": "/opt/sensoroni/scripts/stenoquery.sh",
"pcapInputPath": "/nsm/pcap",
"pcapOutputPath": "/nsm/pcapout"
}
}
}
}

View File

@@ -45,13 +45,6 @@ stenoconfdir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
sensoroniconfdir:
file.directory:
- name: /opt/so/conf/sensoroni
- user: 939
- group: 939
- makedirs: True
{% if BPF_STENO %} {% if BPF_STENO %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', INTERFACE + ' ' + BPF_STENO|join(" "),cwd='/root') %} {% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', INTERFACE + ' ' + BPF_STENO|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %} {% if BPF_CALC['stderr'] == "" %}
@@ -77,15 +70,6 @@ stenoconf:
- defaults: - defaults:
BPF_COMPILED: "{{ BPF_COMPILED }}" BPF_COMPILED: "{{ BPF_COMPILED }}"
sensoroniagentconf:
file.managed:
- name: /opt/so/conf/sensoroni/sensoroni.json
- source: salt://pcap/files/sensoroni.json
- user: 939
- group: 939
- mode: 600
- template: jinja
stenoca: stenoca:
file.directory: file.directory:
- name: /opt/so/conf/steno/certs - name: /opt/so/conf/steno/certs
@@ -127,13 +111,6 @@ stenolog:
- group: 941 - group: 941
- makedirs: True - makedirs: True
sensoronilog:
file.directory:
- name: /opt/so/log/sensoroni
- user: 939
- group: 939
- makedirs: True
so-steno: so-steno:
docker_container.{{ STENOOPTIONS.status }}: docker_container.{{ STENOOPTIONS.status }}:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }} - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }}
@@ -156,38 +133,20 @@ append_so-steno_so-status.conf:
file.append: file.append:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-steno - text: so-steno
- unless: grep so-steno /opt/so/conf/so-status/so-status.conf - unless: grep -q so-steno /opt/so/conf/so-status/so-status.conf
{% if STENOOPTIONS.status == 'running' %}
delete_so-steno_so-status.disabled: {% if not STENOOPTIONS.start %}
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-steno$
{% elif STENOOPTIONS.status == 'stopped' %}
so-steno_so-status.disabled: so-steno_so-status.disabled:
file.comment: file.comment:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-steno$ - regex: ^so-steno$
{% endif %} {% else %}
delete_so-steno_so-status.disabled:
so-sensoroni: file.uncomment:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }}
- network_mode: host
- binds:
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw
- /nsm/pcap:/nsm/pcap:rw
- /nsm/import:/nsm/import:rw
- /nsm/pcapout:/nsm/pcapout:rw
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/log/sensoroni:/opt/sensoroni/logs:rw
- watch:
- file: /opt/so/conf/sensoroni/sensoroni.json
append_so-sensoroni_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-sensoroni - regex: ^so-steno$
{% endif %}
{% else %} {% else %}

View File

@@ -8,12 +8,12 @@ include:
wait_for_playbook: wait_for_playbook:
cmd.run: cmd.run:
- name: until nc -z {{ MAINIP }} 3200; do sleep 1; done - name: until nc -z {{ MAINIP }} 3200; do sleep 1; done
- timeout: 30 - timeout: 300
- onchanges:
- cmd: create_user
create_user: create_user:
cmd.script: cmd.script:
- source: salt://playbook/files/automation_user_create.sh - source: salt://playbook/files/automation_user_create.sh
- cwd: /root - cwd: /root
- template: jinja - template: jinja
- onchanges:
- cmd: wait_for_playbook

View File

@@ -2,6 +2,8 @@
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%} # {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%}
# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %} # {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %}
set -e
local_salt_dir=/opt/so/saltstack/local local_salt_dir=/opt/so/saltstack/local
try_count=6 try_count=6
@@ -44,7 +46,11 @@ while [[ $try_count -le 6 ]]; do
echo " api_key: ${automation_api_key}" echo " api_key: ${automation_api_key}"
} >> $local_salt_dir/pillar/global.sls } >> $local_salt_dir/pillar/global.sls
fi fi
exit 0
fi fi
((try_count++)) ((try_count++))
sleep "${interval}s" sleep "${interval}s"
done done
# Timeout exceeded, exit with non-zero exit code
exit 1

View File

@@ -1,11 +1,12 @@
#!/bin/bash #!/bin/bash
# {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} # {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) %} # {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) %}
. /usr/sbin/so-common
default_salt_dir=/opt/so/saltstack/default default_salt_dir=/opt/so/saltstack/default
# Generate salt + hash for admin user # Generate salt + hash for admin user
admin_salt=$(tr -dc "a-zA-Z0-9" < /dev/urandom | fold -w 32 | head -n 1) admin_salt=$(get_random_value 32)
admin_stage1_hash=$(echo -n '{{ admin_pass }}' | sha1sum | awk '{print $1}') admin_stage1_hash=$(echo -n '{{ admin_pass }}' | sha1sum | awk '{print $1}')
admin_hash=$(echo -n "${admin_salt}${admin_stage1_hash}" | sha1sum | awk '{print $1}') admin_hash=$(echo -n "${admin_salt}${admin_stage1_hash}" | sha1sum | awk '{print $1}')
sed -i "s/ADMIN_HASH/${admin_hash}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql sed -i "s/ADMIN_HASH/${admin_hash}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql

View File

@@ -17,7 +17,6 @@ def run():
if ACTION == 'enablefleet': if ACTION == 'enablefleet':
logging.info('so/fleet enablefleet reactor') logging.info('so/fleet enablefleet reactor')
ESECRET = data['data']['enroll-secret']
MAINIP = data['data']['mainip'] MAINIP = data['data']['mainip']
ROLE = data['data']['role'] ROLE = data['data']['role']
HOSTNAME = data['data']['hostname'] HOSTNAME = data['data']['hostname']
@@ -30,12 +29,6 @@ def run():
line = re.sub(r'fleet_manager: \S*', f"fleet_manager: True", line.rstrip()) line = re.sub(r'fleet_manager: \S*', f"fleet_manager: True", line.rstrip())
print(line) print(line)
# Update the enroll secret in the secrets pillar
if ESECRET != "":
for line in fileinput.input(SECRETSFILE, inplace=True):
line = re.sub(r'fleet_enroll-secret: \S*', f"fleet_enroll-secret: {ESECRET}", line.rstrip())
print(line)
# Update the Fleet host in the static pillar # Update the Fleet host in the static pillar
for line in fileinput.input(STATICFILE, inplace=True): for line in fileinput.input(STATICFILE, inplace=True):
line = re.sub(r'fleet_hostname: \S*', f"fleet_hostname: '{HOSTNAME}'", line.rstrip()) line = re.sub(r'fleet_hostname: \S*', f"fleet_hostname: '{HOSTNAME}'", line.rstrip())
@@ -46,6 +39,18 @@ def run():
line = re.sub(r'fleet_ip: \S*', f"fleet_ip: '{MAINIP}'", line.rstrip()) line = re.sub(r'fleet_ip: \S*', f"fleet_ip: '{MAINIP}'", line.rstrip())
print(line) print(line)
if ACTION == 'update-enrollsecret':
logging.info('so/fleet update-enrollsecret reactor')
ESECRET = data['data']['enroll-secret']
# Update the enroll secret in the secrets pillar
if ESECRET != "":
for line in fileinput.input(SECRETSFILE, inplace=True):
line = re.sub(r'fleet_enroll-secret: \S*', f"fleet_enroll-secret: {ESECRET}", line.rstrip())
print(line)
if ACTION == 'genpackages': if ACTION == 'genpackages':
logging.info('so/fleet genpackages reactor') logging.info('so/fleet genpackages reactor')

View File

@@ -0,0 +1,39 @@
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
{%- set DESCRIPTION = salt['pillar.get']('sensoroni:node_description') %}
{%- set ADDRESS = salt['pillar.get']('sensoroni:node_address') %}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %}
{%- set CHECKININTERVALMS = salt['pillar.get']('sensoroni:node_checkin_interval_ms', 10000) %}
{%- set ROLE = grains.id.split('_') | last %}
{%- if ROLE in ['eval', 'standalone', 'sensor', 'heavynode'] %}
{%- set STENODEFAULT = True %}
{%- else %}
{%- set STENODEFAULT = False %}
{%- endif %}
{%- set STENOENABLED = salt['pillar.get']('steno:enabled', STENODEFAULT) %}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"info",
"agent": {
"role": "{{ grains.role }}",
"description": "{{ DESCRIPTION }}",
"address": "{{ ADDRESS }}",
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
"verifyCert": false,
"modules": {
"importer": {},
"statickeyauth": {
"apiKey": "{{ SENSORONIKEY }}"
{%- if STENOENABLED %}
},
"stenoquery": {
"executablePath": "/opt/sensoroni/scripts/stenoquery.sh",
"pcapInputPath": "/nsm/pcap",
"pcapOutputPath": "/nsm/pcapout"
}
{%- else %}
}
{%- endif %}
}
}
}

45
salt/sensoroni/init.sls Normal file
View File

@@ -0,0 +1,45 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
sensoroniconfdir:
file.directory:
- name: /opt/so/conf/sensoroni
- user: 939
- group: 939
- makedirs: True
sensoroniagentconf:
file.managed:
- name: /opt/so/conf/sensoroni/sensoroni.json
- source: salt://sensoroni/files/sensoroni.json
- user: 939
- group: 939
- mode: 600
- template: jinja
sensoronilog:
file.directory:
- name: /opt/so/log/sensoroni
- user: 939
- group: 939
- makedirs: True
so-sensoroni:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }}
- network_mode: host
- binds:
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw
- /nsm/pcap:/nsm/pcap:rw
- /nsm/import:/nsm/import:rw
- /nsm/pcapout:/nsm/pcapout:rw
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/log/sensoroni:/opt/sensoroni/logs:rw
- watch:
- file: /opt/so/conf/sensoroni/sensoroni.json
append_so-sensoroni_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-sensoroni

View File

@@ -1,6 +1,29 @@
[ [
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" }, { "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
{ "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" }, "links": [
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" }, "/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" } ]},
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
"links": [
"/#/hunt?q=\"{:log.id.fuid}\" OR \"{:log.id.uid}\" OR \"{:network.community_id}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.fuid}\" OR \"{:log.id.uid}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.fuid}\" OR \"{:network.community_id}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.uid}\" OR \"{:network.community_id}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.fuid}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.uid}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:network.community_id}\" | groupby event.module event.dataset"
]},
{ "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "target": "",
"links": [
"/joblookup?esid={:soc_id}",
"/joblookup?ncid={:network.community_id}"
]},
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "target": "_blank",
"links": [
"https://www.google.com/search?q={value}"
]},
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "target": "_blank",
"links": [
"https://www.virustotal.com/gui/search/{value}"
]}
] ]

View File

@@ -1,42 +1,54 @@
{ {
"title": "Security Onion 2.3.10 is here!", "title": "Security Onion 2.3.20 is here!",
"changes": [ "changes": [
{ "summary": "UEFI installs with multiple disks should work as intended now." }, { "summary": "soup has been refactored. You will need to run it a few times to get all the changes properly. We are working on making this even easier for future releases."},
{ "summary": "Telegraf scripts will now make sure they are not already running before execution." }, { "summary": "soup now has awareness of Elastic Features and now downloads the appropriate Docker containers."},
{ "summary": "You are now prompted during setup if you want to change the docker IP range. If you change this it needs to be the same on all nodes in the grid." }, { "summary": "The Sensors interface has been renamed to Grid. This interface now includes all Security Onion nodes."},
{ "summary": "Soup will now download the new containers before stopping anything. If anything fails it will now exit and leave the grid at the current version." }, { "summary": "Grid interface now includes the status of the node. The status currently shows either Online (blue) or Offline (orange). If a node does not check-in on time then it will be marked as Offline."},
{ "summary": "All containers are now hosted on quay.io to prevent pull limitations. We are now using GPG keys to determine if the image is from Security Onion." }, { "summary": "Grid interface now includes the IP and Role of each node in the grid."},
{ "summary": "Osquery installers have been updated to osquery 4.5.1." }, { "summary": "Grid interface includes a new Filter search input to filter the visible list of grid nodes to a desired subset. As an example, typing in “sensor” will hide all nodes except those that behave as a sensor."},
{ "summary": "Fix for bug where Playbook was not removing the Elastalert rules for inactive Plays." }, { "summary": "The Grid description field can now be customized via the local minion pillar file for each node."},
{ "summary": "Exifdata reported by Strelka is now constrained to a single multi-valued field to prevent mapping explosion (scan.exiftool)." }, { "summary": "SOC will now draw attention to an unhealthy situation within the grid or with the connection between the users browser and the manager node. For example, when the Grid has at least one Offline node the SOC interface will show an exclamation mark in front of the browser tabs title and an exclamation mark next to the Grid menu option in SOC. Additionally, the favicon will show an orange marker in the top-right corner (dynamic favicons not supported in Safari). Additionally, if the users web browser is unable to communicate with the manager the unhealth indicators appear along with a message at the top of SOC that states there is a connection problem."},
{ "summary": "Resolved issue with Navigator layer(s) not loading correctly." }, { "summary": "Docker has been upgraded to the latest version."},
{ "summary": "Wazuh authd is now started by default on port 1515/tcp." }, { "summary": "Docker should be more reliable now as Salt is now managing daemon.json."},
{ "summary": "Wazuh API default credentials are now removed after setup. Scripts have been added for API user management." }, { "summary": "You can now install Elastic in a traditional cluster. When setting up the manager select Advanced and follow the prompts. Replicas are controlled in global.sls."},
{ "summary": "Upgraded Salt to 3002.2 due to CVEs." }, { "summary": "You can now use Hot and Warm routing with Elastic in a traditional cluster. You can change the box.type in the minions sls file. You will need to create a curator job to re-tag the indexes based on your criteria."},
{ "summary": "If salt-minion is unable to apply states after the defined threshold, we assume salt-minion is in a bad state and the salt-minion service will be restarted." }, { "summary": "Telegraf has been updated to version 1.16.3."},
{ "summary": "Fixed bug that prevented mysql from installing for Fleet if Playbook wasn't also installed." }, { "summary": "Grafana has been updated to 7.3.4 to resolve some XSS vulnerabilities."},
{ "summary": "<code>so-status</code> will now show STARTING or WAIT_START, instead of ERROR, if <code>so-status</code> is run before a salt highstate has started or finished for the first time after system startup" }, { "summary": "Grafana graphs have been changed to graphs vs guages so alerting can be set up."},
{ "summary": "Stenographer can now be disabled on a sensor node by setting the pillar steno:enabled:false in it's minion.sls file or globally if set in the global.sls file" }, { "summary": "Grafana is now completely pillarized, allowing users to customize alerts and making it customizable for email, Slack, etc. See the docs <a href=\"https://securityonion.net/docs/grafana\">here</a>."},
{ "summary": "Added <code>so-ssh-harden</code> script that runs the commands listed in <a href='https://docs.securityonion.net/en/2.3/ssh.html' target='so-help'>https://docs.securityonion.net/en/2.3/ssh.html</a>" }, { "summary": "Yara rules now should properly install on non-airgap installs. Previously, users had to wait for an automated job to place them in the correct location."},
{ "summary": "NGINX now redirects the browser to the hostname/IP address/FQDN based on global:url_base" }, { "summary": "Strelka backend will not stop itself any more. Previously, its behavior was to shut itself down after fifteen minutes and wait for Salt to restart it to look for work before shutting down again."},
{ "summary": "MySQL state now waits for MySQL server to respond to a query before completeing" }, { "summary": "Strelka daily rule updates are now logged to <code>/nsm/strelka/log/yara-update.log</code>"},
{ "summary": "Added Analyst option to network installs" }, { "summary": "Several changes to the setup script to improve install reliability."},
{ "summary": "Acknowledging (and Escalating) alerts did not consistently remove the alert from the visible list; this has been corrected." }, { "summary": "Airgap now supports the import node type."},
{ "summary": "Escalating alerts that have a <i>rule.case_template</i> field defined will automatically assign that case template to the case generated in TheHive." }, { "summary": "Custom Zeek file extraction values in the pillar now work properly."},
{ "summary": "Alerts and Hunt interface quick action bar has been converted into a vertical menu to improve quick action option clarity. Related changes also eliminated the issues that occurred when the quick action bar was appearing to the left of the visible browser area." }, { "summary": "TheHive has been updated to support Elastic 7."},
{ "summary": "Updated Go to newer version to fix a timezone, daylight savings time (DST) issue that resulted in Alerts and Hunt interfaces not consistently showing results." }, { "summary": "Cortex image now includes whois package to correct an issue with the CERTatPassiveDNS analyzer."},
{ "summary": "Improved Hunt and Alert table sorting." }, { "summary": "Hunt and Alert quick action menu has been refactored into submenus."},
{ "summary": "Alerts interface now allows absolute time searches." }, { "summary": "New clipboard quick actions now allow for copying fields or entire events to the clipboard."},
{ "summary": "Alerts interface 'Hunt' quick action is now working as intended." }, { "summary": "PCAP Add Job form now retains previous job details for quickly adding additional jobs. A new Clear button now exists at the bottom of this form to clear out these fields and forget the previous job details."},
{ "summary": "Alerts interface 'Ack' icon tooltip has been changed from 'Dismiss' to 'Acknowledge' for consistency." }, { "summary": "PCAP Add Job form now allows users to perform arbitrary PCAP lookups of imported PCAP data (data imported via the <code>so-import-pcap</code> script)."},
{ "summary": "Hunt interface bar charts will now show the quick action menu when clicked instead of assuming the click was intended to add an include filter." }, { "summary": "Downloads page now allows direct download of Wazuh agents for Linux, Mac, and Windows from the manager, and shows the version of Wazuh and Elastic installed with Security Onion."},
{ "summary": "Hunt interface quick action will now cast a wider net on field searches." }, { "summary": "PCAP job interface now shows additional job filter criteria when expanding the job filter details."},
{ "summary": "Now explicitly preventing the use of a dollar sign ($) character in web user passwords during setup." }, { "summary": "Upgraded authentication backend to Kratos 0.5.5."},
{ "summary": "Cortex container will now restart properly if the SO host was not gracefully shutdown." }, { "summary": "SOC tables with the “Rows per Page” dropdown no longer show truncated page counts."},
{ "summary": "Added syslog plugin to the logstash container; this is not in-use by default but available for those users that choose to use it." }, { "summary": "Several Hunt errors are now more descriptive, particularly those around malformed queries."},
{ "summary": "Winlogbeat download package is now available from the SOC Downloads interface." }, { "summary": "SOC Error banner has been improved to avoid showing raw HTML syntax, making connection and server-side errors more readable."},
{ "summary": "Upgraded Kratos authentication system." }, { "summary": "Hunt and Alerts interfaces will now allow pivoting to PCAP from a group of results if the grouped results contain a network.community_id field."},
{ "summary": "Added new Reset Defaults button to the SOC Profile Settings interface which allows users to reset all local browser SOC customizations back to their defaults. This includes things like default sort column, sort order, items per page, etc." }, { "summary": "New “Correlate” quick action will pivot to a new Hunt search for all events that can be correlated by at least one of various event IDs."},
{ "summary": "Known Issues <ul><li>Following the Salt minion upgrade on remote nodes, the salt-minion service may not restart properly. If this occurs, you can ssh to the minion and run <code>sudo systemctl restart salt-minion</code>. If you do not want to connect to each node and manually restart the salt-minion, the new salt-minion watch process will restart it automatically after 1 hour.</li><li>During soup, you may see the following during the first highstate run, it can be ignored: <code>Rendering SLS '<some_sls_name_here>' failed: Jinja variable 'list object' has no attribute 'values'</code>. The second highstate will complete without that error.</li></ul>" } { "summary": "Fixed bug that caused some Hunt queries to not group correctly without a .keyword suffix. This has been corrected so that the .keyword suffix is no longer necessary on those groupby terms."},
{ "summary": "Fixed issue where PCAP interface loses formatting and color coding when opening multiple PCAP tabs."},
{ "summary": "Alerts interface now has a Refresh button that allows users to refresh the current alerts view without refreshing the entire SOC application."},
{ "summary": "Hunt and Alerts interfaces now have an auto-refresh dropdown that will automatically refresh the current view at the selected frequency."},
{ "summary": "The <code>so-elastalert-test</code> script has been refactored to work with Security Onion 2.3."},
{ "summary": "The included Logstash image now includes Kafka plugins."},
{ "summary": "Wazuh agent registration process has been improved to support slower hardware and networks."},
{ "summary": "An Elasticsearch ingest pipeline has been added for suricata.ftp_data."},
{ "summary": "Elasticsearchs indices.query.bool.max_clause_count value has been increased to accommodate a slightly larger number of fields (1024 -> 1500) when querying using a wildcard."},
{ "summary": "On nodes being added to an existing grid, setup will compare the version currently being installed to the manager (>=2.3.20), pull the correct Security Onion version from the manager if there is a mismatch, and run that version."},
{ "summary": "Setup will gather any errors found during a failed install into <code>/root/errors.log</code> for easy copy/paste and debugging."},
{ "summary": "Selecting Suricata as the metadata engine no longer results in the install failing."},
{ "summary": "<code>so-rule-update</code> now accepts arguments to idstools. For example, <code>so-rule-update -f</code> will force idstools to pull rules, ignoring the default 15-minute pull limit."}
] ]
} }

View File

@@ -1,6 +1,29 @@
[ [
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" }, { "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
{ "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" }, "links": [
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" }, "/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" } ]},
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
"links": [
"/#/hunt?q=\"{:log.id.fuid}\" OR \"{:log.id.uid}\" OR \"{:network.community_id}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.fuid}\" OR \"{:log.id.uid}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.fuid}\" OR \"{:network.community_id}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.uid}\" OR \"{:network.community_id}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.fuid}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:log.id.uid}\" | groupby event.module event.dataset",
"/#/hunt?q=\"{:network.community_id}\" | groupby event.module event.dataset"
]},
{ "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "target": "",
"links": [
"/joblookup?esid={:soc_id}",
"/joblookup?ncid={:network.community_id}"
]},
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "target": "_blank",
"links": [
"https://www.google.com/search?q={value}"
]},
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "target": "_blank",
"links": [
"https://www.virustotal.com/gui/search/{value}"
]}
] ]

View File

@@ -43,6 +43,10 @@
"password": "", "password": "",
"verifyCert": false "verifyCert": false
}, },
"sostatus": {
"refreshIntervalMs": 30000,
"offlineThresholdMs": 60000
},
{% if THEHIVEKEY != '' %} {% if THEHIVEKEY != '' %}
"thehive": { "thehive": {
"hostUrl": "http://{{ MANAGERIP }}:9000/thehive", "hostUrl": "http://{{ MANAGERIP }}:9000/thehive",

View File

@@ -12,7 +12,7 @@
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} {% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %} {% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import', 'helixsensor'] %}
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %} {% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
{% set ca_server = grains.id %} {% set ca_server = grains.id %}
{% else %} {% else %}

View File

@@ -6,8 +6,8 @@
{%- endif -%} {%- endif -%}
logging_cfg: '/etc/strelka/logging.yaml' logging_cfg: '/etc/strelka/logging.yaml'
limits: limits:
max_files: 5000 max_files: 0
time_to_live: 900 time_to_live: 0
max_depth: 15 max_depth: 15
distribution: 600 distribution: 600
scanner: 150 scanner: 150
@@ -215,14 +215,6 @@ scanners:
priority: 5 priority: 5
options: options:
tmp_directory: '/dev/shm/' tmp_directory: '/dev/shm/'
'ScanMmbot':
- positive:
flavors:
- 'vb_file'
- 'vbscript'
priority: 5
options:
server: 'strelka_mmrpc_1:33907'
'ScanOcr': 'ScanOcr':
- positive: - positive:
flavors: flavors:

View File

@@ -16,7 +16,7 @@ throughput:
delay: 0s delay: 0s
files: files:
patterns: patterns:
- '/nsm/strelka/*' - '/nsm/strelka/unprocessed/*'
delete: false delete: false
gatekeeper: true gatekeeper: true
response: response:

View File

@@ -72,13 +72,20 @@ strelkalogdir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
strelkastagedir: strelkaprocessed:
file.directory: file.directory:
- name: /nsm/strelka/processed - name: /nsm/strelka/processed
- user: 939 - user: 939
- group: 939 - group: 939
- makedirs: True - makedirs: True
strelkaunprocessed:
file.directory:
- name: /nsm/strelka/unprocessed
- user: 939
- group: 939
- makedirs: True
strelka_coordinator: strelka_coordinator:
docker_container.running: docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-redis:{{ VERSION }} - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-redis:{{ VERSION }}
@@ -163,11 +170,18 @@ append_so-strelka-filestream_so-status.conf:
file.append: file.append:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-strelka-filestream - text: so-strelka-filestream
strelka_zeek_extracted_sync_old:
cron.absent:
- user: root
- name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1'
- minute: '*'
strelka_zeek_extracted_sync: strelka_zeek_extracted_sync:
cron.present: cron.present:
- user: root - user: root
- name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1' - identifier: zeek-extracted-strelka-sync
- name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/unprocessed/ > /dev/null 2>&1'
- minute: '*' - minute: '*'
{% else %} {% else %}
@@ -176,4 +190,4 @@ strelka_state_not_allowed:
test.fail_without_changes: test.fail_without_changes:
- name: strelka_state_not_allowed - name: strelka_state_not_allowed
{% endif %} {% endif %}

View File

@@ -167,6 +167,14 @@ append_so-suricata_so-status.conf:
file.append: file.append:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-suricata - text: so-suricata
- unless: grep -q so-suricata /opt/so/conf/so-status/so-status.conf
{% if grains.role == 'so-import' %}
disable_so-suricata_so-status.conf:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-suricata$
{% endif %}
surilogrotate: surilogrotate:
file.managed: file.managed:

View File

@@ -20,7 +20,7 @@ HOME_NET: "[{{salt['pillar.get']('global:hnmanager', '')}}]"
'*_eval': { '*_eval': {
'default-packet-size': salt['pillar.get']('sensor:mtu', 1500) + hardware_header, 'default-packet-size': salt['pillar.get']('sensor:mtu', 1500) + hardware_header,
}, },
'*_helix': { '*_helixsensor': {
'default-packet-size': salt['pillar.get']('sensor:mtu', 9000) + hardware_header, 'default-packet-size': salt['pillar.get']('sensor:mtu', 9000) + hardware_header,
}, },
'*': { '*': {

View File

@@ -48,6 +48,7 @@ so-telegraf:
- HOST_ETC=/host/etc - HOST_ETC=/host/etc
- HOST_SYS=/host/sys - HOST_SYS=/host/sys
- HOST_MOUNT_PREFIX=/host - HOST_MOUNT_PREFIX=/host
- GODEBUG=x509ignoreCN=0
- network_mode: host - network_mode: host
- binds: - binds:
- /opt/so/log/telegraf:/var/log/telegraf:rw - /opt/so/log/telegraf:/var/log/telegraf:rw
@@ -84,4 +85,4 @@ telegraf_state_not_allowed:
test.fail_without_changes: test.fail_without_changes:
- name: telegraf_state_not_allowed - name: telegraf_state_not_allowed
{% endif %} {% endif %}

View File

@@ -15,7 +15,6 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=stenoloss APP=stenoloss
lf=/tmp/$APP-pidLockFile lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists # create empty lock file if none exists
@@ -25,7 +24,22 @@ read lastPID < $lf
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit [ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf echo $$ > $lf
# Get the data TSFILE=/var/log/telegraf/laststenodrop.log
DROP=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop | awk '{print $14}' | awk -F "=" '{print $2}') if [ -f "$TSFILE" ]; then
LASTTS=$(cat $TSFILE)
else
LASTTS=0
fi
echo "stenodrop drop=$DROP" # Get the data
LOGLINE=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop)
CURRENTTS=$(echo $LOGLINE | awk '{print $1}')
if [[ "$CURRENTTS" != "$LASTTS" ]]; then
DROP=$(echo $LOGLINE | awk '{print $14}' | awk -F "=" '{print $2}')
echo $CURRENTTS > $TSFILE
else
DROP=0
fi
echo "stenodrop drop=$DROP"

View File

@@ -44,7 +44,7 @@ if [ $CHECKIT == 2 ]; then
TOTALPAST=$(($PASTPACKETS + $PASTDROP)) TOTALPAST=$(($PASTPACKETS + $PASTDROP))
TOTAL=$(($TOTALCURRENT - $TOTALPAST)) TOTAL=$(($TOTALCURRENT - $TOTALPAST))
LOSS=$(echo $DROPPED $TOTAL / p | dc) LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
echo "suridrop drop=$LOSS" echo "suridrop drop=$LOSS"
fi fi
else else

View File

@@ -1,9 +1,11 @@
cluster.name: "thehive" cluster.name: thehive
network.host: 0.0.0.0 network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1 discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly. # This is a test -- if this is here, then the volume is mounted correctly.
path.logs: /var/log/elasticsearch path.logs: /var/log/elasticsearch
action.destructive_requires_name: true action.destructive_requires_name: true
discovery.type: single-node
script.allowed_types: inline
transport.bind_host: 0.0.0.0 transport.bind_host: 0.0.0.0
transport.publish_host: 0.0.0.0 transport.publish_host: 0.0.0.0
transport.publish_port: 9500 transport.publish_port: 9500
@@ -11,6 +13,5 @@ http.host: 0.0.0.0
http.port: 9400 http.port: 9400
transport.tcp.port: 9500 transport.tcp.port: 9500
transport.host: 0.0.0.0 transport.host: 0.0.0.0
thread_pool.index.queue_size: 100000
thread_pool.search.queue_size: 100000 thread_pool.search.queue_size: 100000
thread_pool.bulk.queue_size: 100000 thread_pool.write.queue_size: 100000

View File

@@ -89,14 +89,6 @@ so-thehive-es:
- /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro - /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/log/thehive:/var/log/elasticsearch:rw - /opt/so/log/thehive:/var/log/elasticsearch:rw
- environment: - environment:
- http.host=0.0.0.0
- http.port=9400
- transport.tcp.port=9500
- transport.host=0.0.0.0
- cluster.name=thehive
- thread_pool.index.queue_size=100000
- thread_pool.search.queue_size=100000
- thread_pool.bulk.queue_size=100000
- ES_JAVA_OPTS=-Xms512m -Xmx512m - ES_JAVA_OPTS=-Xms512m -Xmx512m
- port_bindings: - port_bindings:
- 0.0.0.0:9400:9400 - 0.0.0.0:9400:9400
@@ -164,4 +156,4 @@ thehive_state_not_allowed:
test.fail_without_changes: test.fail_without_changes:
- name: thehive_state_not_allowed - name: thehive_state_not_allowed
{% endif %} {% endif %}

View File

@@ -44,9 +44,10 @@ base:
- patch.os.schedule - patch.os.schedule
- motd - motd
- salt.minion-check - salt.minion-check
- sensoroni
- salt.lasthighstate - salt.lasthighstate
'*_helix and G@saltversion:{{saltversion}}': '*_helixsensor and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master - salt.master
- ca - ca
@@ -60,9 +61,8 @@ base:
- suricata - suricata
- zeek - zeek
- redis - redis
{%- if LOGSTASH %} - elasticsearch
- logstash - logstash
{%- endif %}
{%- if FILEBEAT %} {%- if FILEBEAT %}
- filebeat - filebeat
{%- endif %} {%- endif %}

View File

@@ -1,8 +1,8 @@
#!/bin/bash #!/bin/bash
{% set ES = salt['pillar.get']('manager:mainip', '') %} {% set ES = salt['pillar.get']('manager:mainip', '') %}
{%- set MANAGER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
# Wait for ElasticSearch to come up, so that we can query for version infromation # Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..." echo -n "Waiting for ElasticSearch..."
@@ -34,9 +34,10 @@ echo "Applying cross cluster search config..."
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
# Add all the search nodes to cross cluster searching. # Add all the search nodes to cross cluster searching.
{%- if TRUECLUSTER is sameas false %}
{%- if salt['pillar.get']('nodestab', {}) %} {%- if salt['pillar.get']('nodestab', {}) %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
curl -XPUT -L http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}' curl -XPUT -L http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
{%- endfor %} {%- endfor %}
{%- endif %}
{%- endif %} {%- endif %}

View File

@@ -55,33 +55,58 @@ register_agent() {
# Adding agent and getting Id from manager # Adding agent and getting Id from manager
echo "" echo ""
echo "Adding agent:" echo "Adding agent:"
echo "curl -s -u $USER:**** -k -X POST -d 'name=$AGENT_NAME&ip=$AGENT_IP' $PROTOCOL://$API_IP:$API_PORT/agents" echo "Executing: curl -s -u $USER:**** -k -X POST -d 'name=$AGENT_NAME&ip=$AGENT_IP' $PROTOCOL://$API_IP:$API_PORT/agents"
API_RESULT=$(curl -s -u $USER:"$PASSWORD" -k -X POST -d 'name='$AGENT_NAME'&ip='$AGENT_IP -L $PROTOCOL://$API_IP:$API_PORT/agents) API_RESULT=$(curl -s -u $USER:"$PASSWORD" -k -X POST -d 'name='$AGENT_NAME'&ip='$AGENT_IP -L $PROTOCOL://$API_IP:$API_PORT/agents)
echo -e $API_RESULT | grep -q "\"error\":0" 2>&1 # Get agent id and key
AGENT_ID=$(echo "$API_RESULT" | jq -er ".data.id")
GOT_ID=$?
AGENT_KEY=$(echo "$API_RESULT" | jq -er ".data.key")
GOT_KEY=$?
if [ "$?" != "0" ]; then if [[ -z "$AGENT_ID" || -z "$AGENT_KEY" || $GOT_ID -ne 0 || $GOT_KEY -ne 0 ]]; then
echo -e $API_RESULT | sed -rn 's/.*"message":"(.+)".*/\1/p' echo "Failed Result: $API_RESULT"
return 1
else else
# Get agent id and agent key
AGENT_ID=$(echo $API_RESULT | cut -d':' -f 4 | cut -d ',' -f 1)
AGENT_KEY=$(echo $API_RESULT | cut -d':' -f 5 | cut -d '}' -f 1)
echo "Agent '$AGENT_NAME' with ID '$AGENT_ID' added." echo "Agent '$AGENT_NAME' with ID '$AGENT_ID' added."
echo "Key for agent '$AGENT_ID' received." echo "Key for agent '$AGENT_ID' received."
# Importing key # Importing key
echo "" echo ""
echo "Importing authentication key:" echo "Importing authentication key:"
echo "y" | /var/ossec/bin/manage_agents -i $AGENT_KEY echo "y" | /var/ossec/bin/manage_agents -i "$AGENT_KEY"
# Restarting agent # Restarting agent
echo "" echo ""
echo "Restarting:" echo "Restarting:"
echo "" echo ""
/var/ossec/bin/ossec-control restart /var/ossec/bin/ossec-control restart
return 0
fi fi
} }
wait_for_manager() {
echo "Waiting for Wazuh manager to become ready..."
maxAttempts=$1
attempts=0
while [[ $attempts -lt $maxAttempts ]]; do
attempts=$((attempts+1))
AGENTS_OUTPUT=$(curl -s -u $USER:"$PASSWORD" -k -X GET -L $PROTOCOL://$API_IP:$API_PORT/agents)
MANAGER_STATUS=$(echo "$AGENTS_OUTPUT" | jq -r ".data.items[0].status")
if [ "$MANAGER_STATUS" == "Active" ]; then
echo "Wazuh manager is active, ready to proceed."
return 0
else
echo "Received non-Active status response: "
echo "$AGENTS_OUTPUT"
echo
echo "Manager is not ready after attempt $attempts of $maxAttempts, sleeping for 30 seconds."
sleep 30
fi
done
return 1
}
remove_agent() { remove_agent() {
echo "Found: $AGENT_ID" echo "Found: $AGENT_ID"
echo "Removing previous registration for '$AGENT_NAME' using ID: $AGENT_ID ..." echo "Removing previous registration for '$AGENT_NAME' using ID: $AGENT_ID ..."
@@ -140,11 +165,18 @@ if [ -f /opt/so/conf/wazuh/initial_agent_registration.log ]; then
echo "Agent $AGENT_ID already registered!" echo "Agent $AGENT_ID already registered!"
exit 0 exit 0
else else
echo "Waiting before registering agent..." retries=30
sleep 30s if wait_for_manager $retries; then
register_agent if register_agent; then
cleanup_creds cleanup_creds
echo "Initial agent $AGENT_ID with IP $AGENT_IP registered on $DATE." > /opt/so/conf/wazuh/initial_agent_registration.log echo "Initial agent $AGENT_ID with IP $AGENT_IP registered on $DATE." > /opt/so/conf/wazuh/initial_agent_registration.log
exit 0 exit 0
else
echo "ERROR: Failed to register agent"
fi
else
echo "ERROR: Wazuh manager did not become ready after $retries attempts; unable to proceed with registration"
fi
fi fi
#remove_agent
exit 1

View File

@@ -115,6 +115,10 @@ append_so-wazuh_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-wazuh - text: so-wazuh
/opt/so/conf/wazuh:
file.symlink:
- target: /nsm/wazuh/etc
# Register the agent # Register the agent
registertheagent: registertheagent:
cmd.run: cmd.run:
@@ -133,10 +137,6 @@ wazuhagentservice:
- name: wazuh-agent - name: wazuh-agent
- enable: True - enable: True
/opt/so/conf/wazuh:
file.symlink:
- target: /nsm/wazuh/etc
hidsruledir: hidsruledir:
file.directory: file.directory:
- name: /opt/so/rules/hids - name: /opt/so/rules/hids

View File

@@ -200,6 +200,14 @@ append_so-zeek_so-status.conf:
file.append: file.append:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-zeek - text: so-zeek
- unless: grep -q so-zeek /opt/so/conf/so-status/so-status.conf
{% if grains.role == 'so-import' %}
disable_so-zeek_so-status.conf:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-zeek$
{% endif %}
{% else %} {% else %}

View File

@@ -1,4 +1,5 @@
{%- import_yaml "zeek/fileextraction_defaults.yaml" as zeek with context %} {% import_yaml "zeek/fileextraction_defaults.yaml" as zeek_default -%}
{% set zeek = salt['grains.filter_by'](zeek_default, default='zeek', merge=salt['pillar.get']('zeek', {})) -%}
# Directory to stage Zeek extracted files before processing # Directory to stage Zeek extracted files before processing
redef FileExtract::prefix = "/nsm/zeek/extracted/"; redef FileExtract::prefix = "/nsm/zeek/extracted/";
# Set a limit to the file size # Set a limit to the file size
@@ -6,7 +7,7 @@ redef FileExtract::default_limit = 9000000;
# These are the mimetypes we want to rip off the networks # These are the mimetypes we want to rip off the networks
export { export {
global _mime_whitelist: table[string] of string = { global _mime_whitelist: table[string] of string = {
{%- for li in zeek.zeek.policy.file_extraction %} {%- for li in zeek.policy.file_extraction %}
{%- if not loop.last %} {%- if not loop.last %}
{%- for k,v in li.items() %} {%- for k,v in li.items() %}
["{{ k }}"] = "{{ v }}", ["{{ k }}"] = "{{ v }}",

Binary file not shown.

Before

Width:  |  Height:  |  Size: 188 KiB

After

Width:  |  Height:  |  Size: 245 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 138 KiB

After

Width:  |  Height:  |  Size: 168 KiB

View File

@@ -26,7 +26,7 @@ ALLOW_ROLE=a
BASICZEEK=7 BASICZEEK=7
BASICSURI=7 BASICSURI=7
# BLOGS= # BLOGS=
BNICS=ens6 BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
@@ -46,7 +46,7 @@ MANAGERUPDATES=1
# MGATEWAY= # MGATEWAY=
# MIP= # MIP=
# MMASK= # MMASK=
MNIC=ens5 MNIC=eth0
# MSEARCH= # MSEARCH=
# MSRV= # MSRV=
# MTU= # MTU=

View File

@@ -1,52 +0,0 @@
#!/bin/bash
source ./so-variables
source ../salt/common/tools/sbin/so-common
source ../salt/common/tools/sbin/so-image-common
# Helper functions
filter_unused_nics() {
if [[ $MNIC ]]; then local grep_string="$MNIC\|bond0"; else local grep_string="bond0"; fi
# If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string
if [[ $BNICS ]]; then
grep_string="$grep_string"
for BONDNIC in "${BNICS[@]}"; do
grep_string="$grep_string\|$BONDNIC"
done
fi
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
readarray -t filtered_nics <<< "$filtered_nics"
nic_list=()
for nic in "${filtered_nics[@]}"; do
case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in
1)
nic_list+=("$nic" "Link UP " "OFF")
;;
0)
nic_list+=("$nic" "Link DOWN " "OFF")
;;
*)
nic_list+=("$nic" "Link UNKNOWN " "OFF")
;;
esac
done
export nic_list
}
calculate_useable_cores() {
# Calculate reasonable core usage
local cores_for_zeek=$(( (num_cpu_cores/2) - 1 ))
local lb_procs_round
lb_procs_round=$(printf "%.0f\n" $cores_for_zeek)
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
export lb_procs
}

View File

@@ -15,13 +15,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
source ./so-whiptail # README - DO NOT DEFINE GLOBAL VARIABLES IN THIS FILE. Instead use so-variables.
source ./so-variables
source ./so-common-functions
CONTAINER_REGISTRY=quay.io
SOVERSION=$(cat ../VERSION)
log() { log() {
msg=$1 msg=$1
@@ -48,6 +42,51 @@ logCmd() {
$cmd >> "$setup_log" 2>&1 $cmd >> "$setup_log" 2>&1
} }
filter_unused_nics() {
if [[ $MNIC ]]; then local grep_string="$MNIC\|bond0"; else local grep_string="bond0"; fi
# If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string
if [[ $BNICS ]]; then
grep_string="$grep_string"
for BONDNIC in "${BNICS[@]}"; do
grep_string="$grep_string\|$BONDNIC"
done
fi
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
readarray -t filtered_nics <<< "$filtered_nics"
nic_list=()
for nic in "${filtered_nics[@]}"; do
case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in
1)
nic_list+=("$nic" "Link UP " "OFF")
;;
0)
nic_list+=("$nic" "Link DOWN " "OFF")
;;
*)
nic_list+=("$nic" "Link UNKNOWN " "OFF")
;;
esac
done
export nic_list
}
calculate_useable_cores() {
# Calculate reasonable core usage
local cores_for_zeek=$(( (num_cpu_cores/2) - 1 ))
local lb_procs_round
lb_procs_round=$(printf "%.0f\n" $cores_for_zeek)
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
export lb_procs
}
airgap_rules() { airgap_rules() {
# Copy the rules for suricata if using Airgap # Copy the rules for suricata if using Airgap
mkdir -p /nsm/repo/rules mkdir -p /nsm/repo/rules
@@ -251,19 +290,19 @@ check_pass_match() {
fi fi
} }
# False if stopped, true if running
check_service_status() { check_service_status() {
local service_name=$1 local service_name=$1
echo "Checking service $service_name status" >> "$setup_log" 2>&1 echo "Checking service $service_name status" >> "$setup_log" 2>&1
systemctl status $service_name > /dev/null 2>&1 systemctl status $service_name > /dev/null 2>&1
local status=$? local status=$?
#true if there is an issue with the service false if it is running properly
if [ $status -gt 0 ]; then if [ $status -gt 0 ]; then
echo "$service_name is not running" >> "$setup_log" 2>&1 echo " $service_name is not running" >> "$setup_log" 2>&1
echo 1; return 1;
else else
echo "$service_name is running" >> "$setup_log" 2>&1 echo " $service_name is running" >> "$setup_log" 2>&1
echo 0; return 0;
fi fi
} }
@@ -273,28 +312,27 @@ check_salt_master_status() {
salt-call saltutil.kill_all_jobs > /dev/null 2>&1 salt-call saltutil.kill_all_jobs > /dev/null 2>&1
salt-call state.show_top > /dev/null 2>&1 salt-call state.show_top > /dev/null 2>&1
local status=$? local status=$?
#true if there is an issue talking to salt master
if [ $status -gt 0 ]; then if [ $status -gt 0 ]; then
echo 1; echo " Could not talk to salt master" >> "$setup_log" 2>&1
return 1;
else else
echo "Can talk to salt master" >> "$setup_log" 2>&1 echo " Can talk to salt master" >> "$setup_log" 2>&1
echo 0; return 0;
fi fi
} }
check_salt_minion_status() { check_salt_minion_status() {
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
salt "$MINION_ID" test.ping >> "$setup_log" 2>&1 salt "$MINION_ID" test.ping > /dev/null 2>&1
local status=$? local status=$?
#true if there is an issue getting a job response from the minion
if [ $status -gt 0 ]; then if [ $status -gt 0 ]; then
echo 1; echo " Minion did not respond" >> "$setup_log" 2>&1
return 1;
else else
echo "Received job response from salt minion" >> "$setup_log" 2>&1 echo " Received job response from salt minion" >> "$setup_log" 2>&1
echo 0; return 0;
fi fi
} }
check_soremote_pass() { check_soremote_pass() {
@@ -519,6 +557,19 @@ check_requirements() {
fi fi
} }
compare_versions() {
manager_ver=$(ssh -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
if [[ $manager_ver == "" ]]; then
rm /root/install_opt
echo "Could not determine version of Security Onion running on manager $MSRV. Please check your network settings and run setup again." | tee -a "$setup_log"
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
[[ "$manager_ver" == "$SOVERSION" ]]
return
}
configure_network_sensor() { configure_network_sensor() {
echo "Setting up sensor interface" >> "$setup_log" 2>&1 echo "Setting up sensor interface" >> "$setup_log" 2>&1
local nic_error=0 local nic_error=0
@@ -654,7 +705,7 @@ copy_ssh_key() {
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
echo "Removing old entry for manager from known_hosts if it exists" echo "Removing old entry for manager from known_hosts if it exists"
sed -i "/${MSRV}/d" /root/.ssh/known_hosts grep -q "$MSRV" /root/.ssh/known_hosts && sed -i "/${MSRV}/d" /root/.ssh/known_hosts
echo "Copying the SSH key to the manager" echo "Copying the SSH key to the manager"
#Copy the key over to the manager #Copy the key over to the manager
@@ -668,7 +719,7 @@ create_local_directories() {
for d in $(find $PILLARSALTDIR/$i -type d); do for d in $(find $PILLARSALTDIR/$i -type d); do
suffixdir=${d//$PILLARSALTDIR/} suffixdir=${d//$PILLARSALTDIR/}
if [ ! -d "$local_salt_dir/$suffixdir" ]; then if [ ! -d "$local_salt_dir/$suffixdir" ]; then
mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1 mkdir -pv "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
fi fi
done done
chown -R socore:socore "$local_salt_dir/$i" chown -R socore:socore "$local_salt_dir/$i"
@@ -709,7 +760,8 @@ detect_os() {
exit 1 exit 1
fi fi
echo "Installing required packages to run installer..." >> "$setup_log" 2>&1 # Print message to stdout so the user knows setup is doing something
echo "Installing required packages to run installer..."
# Install bind-utils so the host command exists # Install bind-utils so the host command exists
if [[ ! $is_iso ]]; then if [[ ! $is_iso ]]; then
if ! command -v host > /dev/null 2>&1; then if ! command -v host > /dev/null 2>&1; then
@@ -743,6 +795,7 @@ detect_os() {
exit 1 exit 1
fi fi
# Print message to stdout so the user knows setup is doing something
echo "Installing required packages to run installer..." echo "Installing required packages to run installer..."
# Install network manager so we can do interface stuff # Install network manager so we can do interface stuff
if ! command -v nmcli > /dev/null 2>&1; then if ! command -v nmcli > /dev/null 2>&1; then
@@ -765,12 +818,12 @@ detect_os() {
disable_auto_start() { disable_auto_start() {
if crontab -l -u $INSTALLUSERNAME 2>&1 | grep so-setup > /dev/null 2>&1; then if crontab -l -u $INSTALLUSERNAME 2>&1 | grep -q so-setup; then
# Remove the automated setup script from crontab, if it exists # Remove the automated setup script from crontab, if it exists
logCmd "crontab -u $INSTALLUSERNAME -r" logCmd "crontab -u $INSTALLUSERNAME -r"
fi fi
if grep so-setup /home/$INSTALLUSERNAME/.bash_profile > /dev/null 2>&1; then if grep -s -q so-setup /home/$INSTALLUSERNAME/.bash_profile; then
# Truncate last line of the bash profile # Truncate last line of the bash profile
info "Removing auto-run of setup from bash profile" info "Removing auto-run of setup from bash profile"
sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1 sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1
@@ -820,9 +873,9 @@ docker_install() {
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
fi fi
if [[ ! $is_iso ]]; then if [[ ! $is_iso ]]; then
yum -y install docker-ce-19.03.12-3.el7 containerd.io-1.2.13-3.2.el7; yum -y install docker-ce-19.03.14-3.el7 containerd.io-1.2.13-3.2.el7;
fi fi
yum versionlock docker-ce-19.03.12-3.el7; yum versionlock docker-ce-19.03.14-3.el7;
yum versionlock containerd.io-1.2.13-3.2.el7 yum versionlock containerd.io-1.2.13-3.2.el7
} >> "$setup_log" 2>&1 } >> "$setup_log" 2>&1
@@ -858,6 +911,7 @@ docker_registry() {
echo "Setting up Docker Registry" >> "$setup_log" 2>&1 echo "Setting up Docker Registry" >> "$setup_log" 2>&1
mkdir -p /etc/docker >> "$setup_log" 2>&1 mkdir -p /etc/docker >> "$setup_log" 2>&1
# This will get applied so docker can attempt to start
if [ -z "$DOCKERNET" ]; then if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0 DOCKERNET=172.17.0.0
fi fi
@@ -870,7 +924,7 @@ docker_registry() {
" \"bip\": \"$DNETBIP\","\ " \"bip\": \"$DNETBIP\","\
" \"default-address-pools\": ["\ " \"default-address-pools\": ["\
" {"\ " {"\
" \"base\" : \"$DOCKERNET\","\ " \"base\" : \"$DOCKERNET/24\","\
" \"size\" : 24"\ " \"size\" : 24"\
" }"\ " }"\
" ]"\ " ]"\
@@ -912,6 +966,28 @@ docker_seed_registry() {
} }
download_repo_tarball() {
mkdir -p /root/manager_setup/securityonion
{
local manager_ver
manager_ver=$(ssh -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
scp -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/repo/"$manager_ver".tar.gz /root/manager_setup
} >> "$setup_log" 2>&1
# Fail if the file doesn't download
if ! [ -f /root/manager_setup/"$manager_ver".tar.gz ]; then
rm /root/install_opt
local message="Could not download $manager_ver.tar.gz from manager, please check your network settings and verify the file /opt/so/repo/$manager_ver.tar.gz exists on the manager."
echo "$message" | tee -a "$setup_log"
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
{
tar -xzf /root/manager_setup/"$manager_ver".tar.gz -C /root/manager_setup/securityonion
rm -rf /root/manager_setup/"$manager_ver".tar.gz
} >> "$setup_log" 2>&1
}
fireeye_pillar() { fireeye_pillar() {
local fireeye_pillar_path=$local_salt_dir/pillar/fireeye local fireeye_pillar_path=$local_salt_dir/pillar/fireeye
@@ -920,8 +996,8 @@ fireeye_pillar() {
printf '%s\n'\ printf '%s\n'\
"fireeye:"\ "fireeye:"\
" helix:"\ " helix:"\
" api_key: '$HELIXAPIKEY'" " api_key: '$HELIXAPIKEY'" \
"" > "$fireeye_pillar_path"/init.sls "" > "$fireeye_pillar_path/init.sls"
} }
@@ -953,22 +1029,27 @@ fleet_pillar() {
generate_passwords(){ generate_passwords(){
# Generate Random Passwords for Things # Generate Random Passwords for Things
MYSQLPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) MYSQLPASS=$(get_random_value)
PLAYBOOKDBPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) PLAYBOOKDBPASS=$(get_random_value)
PLAYBOOKADMINPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) PLAYBOOKADMINPASS=$(get_random_value)
PLAYBOOKAUTOMATIONPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) PLAYBOOKAUTOMATIONPASS=$(get_random_value)
FLEETPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) FLEETPASS=$(get_random_value)
FLEETJWT=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) FLEETJWT=$(get_random_value)
GRAFANAPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) GRAFANAPASS=$(get_random_value)
if [[ "$THEHIVE" == "1" ]]; then if [[ "$THEHIVE" == "1" ]]; then
HIVEKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) HIVEKEY=$(get_random_value)
HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) HIVEPLAYSECRET=$(get_random_value)
CORTEXKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) CORTEXKEY=$(get_random_value)
CORTEXORGUSERKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) CORTEXORGUSERKEY=$(get_random_value)
CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) CORTEXPLAYSECRET=$(get_random_value)
fi fi
SENSORONIKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) SENSORONIKEY=$(get_random_value)
KRATOSKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) KRATOSKEY=$(get_random_value)
}
generate_repo_tarball() {
mkdir /opt/so/repo
tar -czf /opt/so/repo/"$SOVERSION".tar.gz ../.
} }
get_redirect() { get_redirect() {
@@ -1002,22 +1083,29 @@ host_pillar() {
printf '%s\n'\ printf '%s\n'\
"host:"\ "host:"\
" mainint: '$MNIC'"\ " mainint: '$MNIC'"\
"sensoroni:"\
" node_address: '$MAINIP'"\
" node_description: '$NODE_DESCRIPTION'"\
"" > "$pillar_file" "" > "$pillar_file"
} }
install_cleanup() { install_cleanup() {
echo "Installer removing the following files:" if [ -f "$temp_install_dir" ]; then
ls -lR "$temp_install_dir" echo "Installer removing the following files:"
ls -lR "$temp_install_dir"
# Clean up after ourselves # Clean up after ourselves
rm -rf "$temp_install_dir" rm -rf "$temp_install_dir"
fi
# All cleanup prior to this statement must be compatible with automated testing. Cleanup # All cleanup prior to this statement must be compatible with automated testing. Cleanup
# that will disrupt automated tests should be placed beneath this statement. # that will disrupt automated tests should be placed beneath this statement.
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
# If Mysql is running stop it # If Mysql is running stop it
/usr/sbin/so-mysql-stop if docker ps --format "{{.Names}}" 2>&1 | grep -q "so-mysql"; then
/usr/sbin/so-mysql-stop
fi
if [[ $setup_type == 'iso' ]]; then if [[ $setup_type == 'iso' ]]; then
info "Removing so-setup permission entry from sudoers file" info "Removing so-setup permission entry from sudoers file"
@@ -1102,15 +1190,18 @@ manager_pillar() {
manager_global() { manager_global() {
local global_pillar="$local_salt_dir/pillar/global.sls" local global_pillar="$local_salt_dir/pillar/global.sls"
if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then if [ -z "$NODE_CHECKIN_INTERVAL_MS" ]; then
SENSOR_CHECKIN_INTERVAL_MS=10000 NODE_CHECKIN_INTERVAL_MS=10000
if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then
SENSOR_CHECKIN_INTERVAL_MS=1000 NODE_CHECKIN_INTERVAL_MS=1000
fi fi
fi fi
if [ -z "$DOCKERNET" ]; then if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0 DOCKERNET=172.17.0.0
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
else
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
fi fi
# Create a global file for global values # Create a global file for global values
@@ -1164,8 +1255,8 @@ manager_global() {
" managerupdate: $MANAGERUPDATES"\ " managerupdate: $MANAGERUPDATES"\
" imagerepo: '$IMAGEREPO'"\ " imagerepo: '$IMAGEREPO'"\
" pipeline: 'redis'"\ " pipeline: 'redis'"\
"pcap:"\ "sensoroni:"\
" sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\ " node_checkin_interval_ms: $NODE_CHECKIN_INTERVAL_MS"\
"strelka:"\ "strelka:"\
" enabled: $STRELKA"\ " enabled: $STRELKA"\
" rules: 1"\ " rules: 1"\
@@ -1174,9 +1265,17 @@ manager_global() {
"elastic:"\ "elastic:"\
" features: False"\ " features: False"\
"elasticsearch:"\ "elasticsearch:"\
" replicas: 0"\ " replicas: 0" >> "$global_pillar"
" true_cluster: False"\ if [ -n "$ESCLUSTERNAME" ]; then
" true_cluster_name: 'so'"\ printf '%s\n'\
" true_cluster: True"\
" true_cluster_name: '$ESCLUSTERNAME'" >> "$global_pillar"
else
printf '%s\n'\
" true_cluster: False"\
" true_cluster_name: 'so'" >> "$global_pillar"
fi
printf '%s\n'\
" discovery_nodes: 1"\ " discovery_nodes: 1"\
" hot_warm_enabled: False"\ " hot_warm_enabled: False"\
" cluster_routing_allocation_disk.threshold_enabled: true"\ " cluster_routing_allocation_disk.threshold_enabled: true"\
@@ -1250,6 +1349,9 @@ manager_global() {
" playbook:"\ " playbook:"\
" rulesets:"\ " rulesets:"\
" - windows"\ " - windows"\
"docker:"\
" range: '$DOCKERNET/24'"\
" bip: '$DOCKERBIP'"\
"redis_settings:"\ "redis_settings:"\
" redis_maxmemory: 812" >> "$global_pillar" " redis_maxmemory: 812" >> "$global_pillar"
@@ -1261,8 +1363,8 @@ minio_generate_keys() {
local charSet="[:graph:]" local charSet="[:graph:]"
ACCESS_KEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) ACCESS_KEY=$(get_random_value)
ACCESS_SECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 40 | head -n 1) ACCESS_SECRET=$(get_random_value 40)
} }
@@ -1277,12 +1379,10 @@ network_setup() {
disable_misc_network_features; disable_misc_network_features;
echo "... Setting ONBOOT for management interface"; echo "... Setting ONBOOT for management interface";
if ! netplan > /dev/null 2>&1; then command -v netplan &> /dev/null || nmcli con mod "$MNIC" connection.autoconnect "yes"
nmcli con mod "$MNIC" connection.autoconnect "yes";
fi
echo "... Copying 99-so-checksum-offload-disable"; echo "... Copying 99-so-checksum-offload-disable";
cp ./install_scripts/99-so-checksum-offload-disable /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable ; cp ./install_scripts/99-so-checksum-offload-disable /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable ;
echo "... Modifying 99-so-checksum-offload-disable"; echo "... Modifying 99-so-checksum-offload-disable";
sed -i "s/\$MNIC/${MNIC}/g" /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable; sed -i "s/\$MNIC/${MNIC}/g" /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable;
@@ -1298,8 +1398,15 @@ elasticsearch_pillar() {
"elasticsearch:"\ "elasticsearch:"\
" mainip: '$MAINIP'"\ " mainip: '$MAINIP'"\
" mainint: '$MNIC'"\ " mainint: '$MNIC'"\
" esheap: '$NODE_ES_HEAP_SIZE'"\ " esheap: '$NODE_ES_HEAP_SIZE'" >> "$pillar_file"
" esclustername: {{ grains.host }}"\ if [ -n "$ESCLUSTERNAME" ]; then
printf '%s\n'\
" esclustername: $ESCLUSTERNAME" >> "$pillar_file"
else
printf '%s\n'\
" esclustername: {{ grains.host }}" >> "$pillar_file"
fi
printf '%s\n'\
" node_type: '$NODETYPE'"\ " node_type: '$NODETYPE'"\
" es_port: $node_es_port"\ " es_port: $node_es_port"\
" log_size_limit: $log_size_limit"\ " log_size_limit: $log_size_limit"\
@@ -1318,7 +1425,7 @@ elasticsearch_pillar() {
parse_install_username() { parse_install_username() {
# parse out the install username so things copy correctly # parse out the install username so things copy correctly
INSTALLUSERNAME=$(pwd | sed -E 's/\// /g' | awk '{ print $2 }') INSTALLUSERNAME=${SUDO_USER:-${USER}}
} }
patch_pillar() { patch_pillar() {
@@ -1380,18 +1487,54 @@ reserve_group_ids() {
reinstall_init() { reinstall_init() {
info "Putting system in state to run setup again" info "Putting system in state to run setup again"
if [[ $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then
local salt_services=( "salt-master" "salt-minion" )
else
local salt_services=( "salt-minion" )
fi
local service_retry_count=20
{ {
# Kill any salt processes if command -v salt-call &> /dev/null; then
pkill -9 -ef /usr/bin/salt # Disable schedule so highstate doesn't start running during the install
salt-call -l info schedule.disable
# Kill any currently running salt jobs, also to prevent issues with highstate.
salt-call -l info saltutil.kill_all_jobs
fi
# Kill any salt processes (safely)
for service in "${salt_services[@]}"; do
# Stop the service in the background so we can exit after a certain amount of time
systemctl stop "$service" &
local pid=$!
local count=0
while check_service_status "$service"; do
if [[ $count -gt $service_retry_count ]]; then
echo "Could not stop $service after 1 minute, exiting setup."
# Stop the systemctl process trying to kill the service, show user a message, then exit setup
kill -9 $pid
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
sleep 5
((count++))
done
done
# Remove all salt configs # Remove all salt configs
rm -rf /etc/salt/global /etc/salt/minion /etc/salt/master /etc/salt/pki/* rm -rf /etc/salt/grains /etc/salt/minion /etc/salt/pki/*
if command -v docker &> /dev/null; then if command -v docker &> /dev/null; then
# Stop and remove all so-* containers so files can be changed with more safety # Stop and remove all so-* containers so files can be changed with more safety
docker stop $(docker ps -a -q --filter "name=so-") if [ $(docker ps -a -q --filter "name=so-" | wc -l) -gt 0 ]; then
docker rm -f $(docker ps -a -q --filter "name=so-") docker stop $(docker ps -a -q --filter "name=so-")
docker rm -f $(docker ps -a -q --filter "name=so-")
fi
fi fi
local date_string local date_string
@@ -1407,7 +1550,7 @@ reinstall_init() {
# Remove the old launcher package in case the config changes # Remove the old launcher package in case the config changes
remove_package launcher-final remove_package launcher-final
} >> $setup_log 2>&1 } >> "$setup_log" 2>&1
} }
backup_dir() { backup_dir() {
@@ -1538,7 +1681,7 @@ saltify() {
'FLEET') 'FLEET')
if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi
;; ;;
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # TODO: should this also be HELIXSENSOR? 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
# Add saltstack repo(s) # Add saltstack repo(s)
wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
@@ -1576,7 +1719,7 @@ saltify() {
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
;; ;;
esac esac
apt-get update >> "$setup_log" 2>&1 apt-get update >> "$setup_log" 2>&1
set_progress_str 8 'Installing salt-minion & python modules' set_progress_str 8 'Installing salt-minion & python modules'
@@ -1604,61 +1747,59 @@ salt_checkin() {
"salt-master" \ "salt-master" \
"salt-minion" "salt-minion"
) )
local LOOP_COUNT=0 local count=0
for service in "${SALT_SERVICES[@]}"; do for service in "${SALT_SERVICES[@]}"; do
echo "Stopping service $service" >> "$setup_log" 2>&1 {
systemctl stop "$service" >> "$setup_log" 2>&1 echo "Restarting service $service"
LOOP_COUNT=0 systemctl restart "$service" &
while ! (( $(check_service_status $service) )); do local pid=$!
echo "$service still running" >> "$setup_log" 2>&1 } >> "$setup_log" 2>&1
if [ $LOOP_COUNT -gt 60 ]; then
echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 count=0
exit 1 while ! (check_service_status "$service"); do
# On final loop, kill the pid trying to restart service and try to manually kill then start it
if [ $count -eq 12 ]; then
{
kill -9 "$pid"
systemctl kill "$service"
systemctl start "$service" &
local pid=$!
} >> "$setup_log" 2>&1
fi fi
sleep 1;
((LOOP_COUNT+=1)) if [ $count -gt 12 ]; then
echo "$service could not be restarted in 120 seconds, exiting" >> "$setup_log" 2>&1
kill -9 "$pid"
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
sleep 10;
((count++))
done done
done done
sleep 5; count=0
while ! (check_salt_master_status); do
for service in "${SALT_SERVICES[@]}"; do
echo "Starting service $service" >> "$setup_log" 2>&1
systemctl start "$service" >> "$setup_log" 2>&1
LOOP_COUNT=0
while (( $(check_service_status $service) )); do
echo "$service still not running" >> "$setup_log" 2>&1
if [ $LOOP_COUNT -gt 60 ]; then
echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1
exit 1
fi
sleep 1;
((LOOP_COUNT+=1))
done
done
sleep 5;
LOOP_COUNT=0
while (( $(check_salt_master_status) )); do
echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1
if [ $LOOP_COUNT -gt 30 ]; then if [ $count -gt 30 ]; then
echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1 echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1
exit 1 kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi fi
sleep 1; sleep 1;
((LOOP_COUNT+=1)) ((count++))
done done
LOOP_COUNT=0 count=0
while (( $(check_salt_minion_status) )); do while ! (check_salt_minion_status); do
echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1 echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1
if [ $LOOP_COUNT -gt 30 ]; then if [ $count -gt 30 ]; then
echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1
exit 1 kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi fi
systemctl kill salt-minion
systemctl start salt-minion
sleep 1; sleep 1;
((LOOP_COUNT+=1)) ((count++))
done done
echo " Confirming existence of the CA certificate" echo " Confirming existence of the CA certificate"
@@ -1708,6 +1849,19 @@ set_network_dev_status_list() {
set_main_ip() { set_main_ip() {
MAINIP=$(ip route get 1 | awk '{print $7;exit}') MAINIP=$(ip route get 1 | awk '{print $7;exit}')
MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2)
}
compare_main_nic_ip() {
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
read -r -d '' message <<- EOM
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
This is not a supported configuration, please remediate and rerun setup.
EOM
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
fi
} }
# Add /usr/sbin to everyone's path # Add /usr/sbin to everyone's path
@@ -1793,7 +1947,6 @@ sensor_pillar() {
if [ "$HNSENSOR" != 'inherit' ]; then if [ "$HNSENSOR" != 'inherit' ]; then
echo " hnsensor: $HNSENSOR" >> "$pillar_file" echo " hnsensor: $HNSENSOR" >> "$pillar_file"
fi fi
} }
set_default_log_size() { set_default_log_size() {
@@ -1881,7 +2034,7 @@ set_initial_firewall_policy() {
;; ;;
'HEAVYNODE') 'HEAVYNODE')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost heavy_node "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;; ;;
@@ -1966,6 +2119,17 @@ set_updates() {
fi fi
} }
steno_pillar() {
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
# Create the stenographer pillar
printf '%s\n'\
"steno:"\
" enabled: True" >> "$pillar_file"
}
mark_version() { mark_version() {
# Drop a file with the current version # Drop a file with the current version
echo "$SOVERSION" > /etc/soversion echo "$SOVERSION" > /etc/soversion

View File

@@ -22,11 +22,24 @@ if [ "$uid" -ne 0 ]; then
exit 1 exit 1
fi fi
# Save the original argument array since we modify it
readarray -t original_args <<< "$@"
cd "$(dirname "$0")" || exit 255 cd "$(dirname "$0")" || exit 255
# Source the generic function libraries that are also used by the product after
# setup. These functions are intended to be reusable outside of the setup process.
source ../salt/common/tools/sbin/so-common
source ../salt/common/tools/sbin/so-image-common
# Setup bash functionality is divided into functions and user-facing prompts.
# Do not attempt to re-use any of this functionality outside of setup. Instead,
# if needed, migrated generic functions into so-common.
source ./so-functions source ./so-functions
source ./so-common-functions
source ./so-whiptail source ./so-whiptail
# Finally, source the default variable definitions, which require availability of
# functions sourced above.
source ./so-variables source ./so-variables
# Parse command line arguments # Parse command line arguments
@@ -54,32 +67,31 @@ while [[ $# -gt 0 ]]; do
esac esac
done done
if ! [ -f /root/install_opt ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
fi
if [[ -f /root/accept_changes ]]; then if [[ -f /root/accept_changes ]]; then
is_reinstall=true is_reinstall=true
# Move last setup log to backup # Move last setup log to backup
mv "$setup_log" "$setup_log.bak" mv "$setup_log" "$setup_log.bak"
[ -f "$error_log" ] && mv "$error_log" "$error_log.bak"
fi fi
# Begin Installation pre-processing
parse_install_username parse_install_username
title "Initializing Setup" if ! [ -f /root/install_opt ]; then
info "Installing as the $INSTALLUSERNAME user" # Begin Installation pre-processing
title "Initializing Setup"
info "Installing as the $INSTALLUSERNAME user"
analyze_system analyze_system
fi
automated=no automated=no
function progress() { function progress() {
local title='Security Onion Install' local title='Security Onion Install'
if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root ]]; then
if [[ -s /var/spool/mail/root ]]; then
echo '[ ERROR ] /var/spool/mail/root grew unexpectedly' >> $setup_log 2>&1
fi
export SO_ERROR=1
title="Error found, please check $setup_log"
fi
if [ $automated == no ]; then if [ $automated == no ]; then
whiptail --title "$title" --gauge 'Please wait while installing...' 6 60 0 # append to text whiptail --title "$title" --gauge 'Please wait while installing...' 6 60 0 # append to text
@@ -115,7 +127,7 @@ case "$setup_type" in
echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1 echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
;; ;;
*) *)
echo "Invalid install type, must be 'iso' or 'network'" | tee $setup_log echo "Invalid install type, must be 'iso' or 'network'" | tee -a $setup_log
exit 1 exit 1
;; ;;
esac esac
@@ -152,14 +164,18 @@ if [ "$automated" == no ]; then
fi fi
fi fi
if (whiptail_you_sure); then if ! [ -f /root/install_opt ]; then
true if (whiptail_you_sure); then
else true
echo "User cancelled setup." | tee $setup_log else
whiptail_cancel echo "User cancelled setup." | tee -a "$setup_log"
fi whiptail_cancel
fi
whiptail_install_type whiptail_install_type
else
source /root/install_opt
fi
if [ "$install_type" = 'EVAL' ]; then if [ "$install_type" = 'EVAL' ]; then
is_node=true is_node=true
@@ -172,7 +188,6 @@ elif [ "$install_type" = 'STANDALONE' ]; then
is_distmanager=true is_distmanager=true
is_node=true is_node=true
is_sensor=true is_sensor=true
is_smooshed=true
elif [ "$install_type" = 'MANAGERSEARCH' ]; then elif [ "$install_type" = 'MANAGERSEARCH' ]; then
is_manager=true is_manager=true
is_distmanager=true is_distmanager=true
@@ -190,7 +205,6 @@ elif [ "$install_type" = 'HEAVYNODE' ]; then
is_node=true is_node=true
is_minion=true is_minion=true
is_sensor=true is_sensor=true
is_smooshed=true
elif [ "$install_type" = 'FLEET' ]; then elif [ "$install_type" = 'FLEET' ]; then
is_minion=true is_minion=true
is_fleet_standalone=true is_fleet_standalone=true
@@ -200,9 +214,7 @@ elif [ "$install_type" = 'HELIXSENSOR' ]; then
elif [ "$install_type" = 'IMPORT' ]; then elif [ "$install_type" = 'IMPORT' ]; then
is_import=true is_import=true
elif [ "$install_type" = 'ANALYST' ]; then elif [ "$install_type" = 'ANALYST' ]; then
cd .. || exit 255 is_analyst=true
./so-analyst-install
exit 0
fi fi
# Say yes to the dress if its an ISO install # Say yes to the dress if its an ISO install
@@ -211,56 +223,96 @@ if [[ "$setup_type" == 'iso' ]]; then
fi fi
# Check if this is an airgap install # Check if this is an airgap install
if [[ ( $is_manager || $is_import ) && $is_iso ]]; then
if [[ $is_manager ]]; then whiptail_airgap
if [[ $is_iso ]]; then if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
whiptail_airgap is_airgap=true
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then fi
is_airgap=true
fi
fi
fi fi
if [[ $is_manager && $is_sensor ]]; then if ! [ -f /root/install_opt ]; then
check_requirements "standalone" if [[ $is_manager && $is_sensor ]]; then
elif [[ $is_fleet_standalone ]]; then check_requirements "standalone"
check_requirements "dist" "fleet" elif [[ $is_fleet_standalone ]]; then
elif [[ $is_sensor && ! $is_eval ]]; then check_requirements "dist" "fleet"
check_requirements "dist" "sensor" elif [[ $is_sensor && ! $is_eval ]]; then
elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then check_requirements "dist" "sensor"
check_requirements "dist" elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then
elif [[ $is_import ]]; then check_requirements "dist"
check_requirements "import" elif [[ $is_import ]]; then
check_requirements "import"
fi
case "$setup_type" in
'iso')
whiptail_set_hostname
whiptail_management_nic
whiptail_dhcp_or_static
if [ "$address_type" != 'DHCP' ]; then
whiptail_management_interface_ip
whiptail_management_interface_mask
whiptail_management_interface_gateway
whiptail_management_interface_dns
whiptail_management_interface_dns_search
fi
;;
'network')
whiptail_network_notice
whiptail_dhcp_warn
whiptail_set_hostname
whiptail_management_nic
;;
esac
if [[ $is_minion ]]; then
whiptail_management_server
fi
if [[ $is_minion || $is_iso ]]; then
whiptail_management_interface_setup
fi
# Init networking so rest of install works
disable_ipv6
set_hostname
if [[ "$setup_type" == 'iso' ]]; then
set_management_interface
fi
if [[ -n "$TURBO" ]]; then
use_turbo_proxy
fi
if [[ $is_minion ]]; then
add_mngr_ip_to_hosts
fi
if [[ $is_minion ]]; then
[ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1
fi
if [[ $is_minion ]] && ! (compare_versions); then
info "Installer version mismatch, downloading correct version from manager"
printf '%s\n' \
"install_type=$install_type" \
"MNIC=$MNIC" \
"HOSTNAME=$HOSTNAME" \
"MSRV=$MSRV"\
"MSRVIP=$MSRVIP" > /root/install_opt
download_repo_tarball
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
fi
if [[ $is_analyst ]]; then
cd .. || exit 255
exec bash so-analyst-install
fi
else
rm -rf /root/install_opt >> "$setup_log" 2>&1
fi fi
if [[ ! $is_import ]]; then
whiptail_patch_schedule
fi
case "$setup_type" in
'iso')
whiptail_set_hostname
whiptail_management_nic
whiptail_dhcp_or_static
if [ "$address_type" != 'DHCP' ]; then
whiptail_management_interface_ip
whiptail_management_interface_mask
whiptail_management_interface_gateway
whiptail_management_interface_dns
whiptail_management_interface_dns_search
fi
#collect_adminuser_inputs
;;
'network')
whiptail_network_notice
whiptail_dhcp_warn
whiptail_set_hostname
whiptail_management_nic
;;
esac
short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}') short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}')
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]') MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]')
@@ -327,8 +379,11 @@ if [[ $is_helix || $is_sensor || $is_import ]]; then
calculate_useable_cores calculate_useable_cores
fi fi
if [[ ! $is_import ]]; then
whiptail_patch_schedule
fi
whiptail_homenet_manager whiptail_homenet_manager
whiptail_dockernet_check
if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then
set_base_heapsizes set_base_heapsizes
@@ -336,6 +391,11 @@ fi
if [[ $is_manager && ! $is_eval ]]; then if [[ $is_manager && ! $is_eval ]]; then
whiptail_manager_adv whiptail_manager_adv
if [ "$MANAGERADV" = 'ADVANCED' ]; then
if [ "$install_type" = 'MANAGER' ] || [ "$install_type" = 'MANAGERSEARCH' ]; then
whiptail_manager_adv_escluster
fi
fi
whiptail_zeek_version whiptail_zeek_version
# Don't run this function for now since Snort is not yet supported # Don't run this function for now since Snort is not yet supported
# whiptail_nids # whiptail_nids
@@ -346,10 +406,6 @@ if [[ $is_manager && ! $is_eval ]]; then
whiptail_oinkcode whiptail_oinkcode
fi fi
if [[ "$STRELKA" == 1 ]]; then
STRELKARULES=1
fi
if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then
whiptail_manager_adv_service_zeeklogs whiptail_manager_adv_service_zeeklogs
fi fi
@@ -358,6 +414,15 @@ fi
if [[ $is_manager ]]; then if [[ $is_manager ]]; then
whiptail_components_adv_warning whiptail_components_adv_warning
whiptail_enable_components whiptail_enable_components
if [[ "$STRELKA" = 1 ]]; then
info "Enabling Strelka rules"
STRELKARULES=1
else
info "Disabling Strelka rules: STRELKA='$STRELKA'"
fi
whiptail_dockernet_check
fi fi
if [[ $is_manager || $is_import ]]; then if [[ $is_manager || $is_import ]]; then
@@ -372,10 +437,6 @@ if [[ $is_distmanager || ( $is_sensor || $is_node || $is_fleet_standalone ) && !
fi fi
fi fi
if [[ $is_minion ]]; then
whiptail_management_server
fi
if [[ $is_distmanager ]]; then if [[ $is_distmanager ]]; then
collect_soremote_inputs collect_soremote_inputs
fi fi
@@ -428,72 +489,69 @@ whiptail_make_changes
# From here on changes will be made. # From here on changes will be made.
echo "1" > /root/accept_changes echo "1" > /root/accept_changes
if [[ $is_reinstall ]]; then # Set up handler for setup to exit early (use `kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1` in child scripts)
reinstall_init trap 'catch $LINENO' SIGUSR1
fi
if [[ -n "$TURBO" ]]; then catch() {
use_turbo_proxy info "Fatal error occurred at $1 in so-setup, failing setup."
fi grep --color=never "ERROR" "$setup_log" > "$error_log"
whiptail_setup_failed
exit
}
if [[ "$setup_type" == 'iso' ]]; then # This block sets REDIRECTIT which is used by a function outside the below subshell
# Init networking so rest of install works set_main_ip >> $setup_log 2>&1
set_hostname compare_main_nic_ip
set_management_interface set_redirect >> $setup_log 2>&1
fi
disable_ipv6
disable_auto_start
if [[ "$setup_type" != 'iso' ]]; then
set_hostname
fi
if [[ $is_minion ]]; then
add_mngr_ip_to_hosts
fi
{
mark_version;
clear_manager;
} >> $setup_log 2>&1
if [[ $is_manager || $is_import ]]; then
{
generate_passwords;
secrets_pillar;
add_socore_user_manager;
} >> $setup_log 2>&1
fi
if [[ $is_manager && ! $is_eval ]]; then
add_soremote_user_manager >> $setup_log 2>&1
fi
{
set_main_ip;
set_redirect;
} >> $setup_log 2>&1
host_pillar >> $setup_log 2>&1
if [[ $is_minion || $is_import ]]; then
set_updates >> $setup_log 2>&1
[ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1
fi
# Begin install # Begin install
{ {
# Set initial percentage to 0 # Set initial percentage to 0
export percentage=0 export percentage=0
# Show initial progress message
set_progress_str 0 'Running initial configuration steps'
set_path set_path
if [[ $is_manager && $is_airgap ]]; then if [[ $is_reinstall ]]; then
info "Creating airgap repo" reinstall_init
create_repo >> $setup_log 2>&1 fi
disable_auto_start
{
mark_version;
clear_manager;
} >> $setup_log 2>&1
if [[ $is_manager || $is_import ]]; then
{
generate_passwords;
secrets_pillar;
} >> $setup_log 2>&1
fi
if [[ $is_manager || $is_import || $is_helix ]]; then
add_socore_user_manager >> $setup_log 2>&1
fi
if [[ $is_manager && ! $is_eval ]]; then
add_soremote_user_manager >> $setup_log 2>&1
fi
host_pillar >> $setup_log 2>&1
if [[ $is_minion || $is_import ]]; then
set_updates >> $setup_log 2>&1
fi
if [[ $is_manager && $is_airgap ]]; then
info "Creating airgap repo"
create_repo >> $setup_log 2>&1
airgap_rules >> $setup_log 2>&1 airgap_rules >> $setup_log 2>&1
fi fi
if [[ $is_minion ]]; then if [[ $is_minion ]]; then
set_progress_str 1 'Configuring firewall' set_progress_str 1 'Configuring firewall'
@@ -511,6 +569,9 @@ fi
if [[ $is_sensor || $is_helix || $is_import ]]; then if [[ $is_sensor || $is_helix || $is_import ]]; then
set_progress_str 4 'Generating sensor pillar' set_progress_str 4 'Generating sensor pillar'
sensor_pillar >> $setup_log 2>&1 sensor_pillar >> $setup_log 2>&1
if [[ $is_sensor || $is_helix ]]; then
steno_pillar >> $setup_log
fi
fi fi
set_progress_str 5 'Installing Salt and dependencies' set_progress_str 5 'Installing Salt and dependencies'
@@ -570,7 +631,7 @@ fi
accept_salt_key_remote >> $setup_log 2>&1 accept_salt_key_remote >> $setup_log 2>&1
fi fi
if [[ $is_manager || $is_import ]]; then if [[ $is_manager || $is_import || $is_helix ]]; then
set_progress_str 20 'Accepting Salt key' set_progress_str 20 'Accepting Salt key'
salt-key -ya "$MINION_ID" >> $setup_log 2>&1 salt-key -ya "$MINION_ID" >> $setup_log 2>&1
fi fi
@@ -580,7 +641,7 @@ fi
if [[ $is_minion ]]; then if [[ $is_minion ]]; then
set_progress_str 22 'Checking if the Salt Minion needs to be updated' set_progress_str 22 'Checking if the Salt Minion needs to be updated'
salt-call state.apply salt.minion -l info >> $setup_log 2>&1 salt-call state.apply -l info salt.minion >> $setup_log 2>&1
fi fi
set_progress_str 23 'Generating CA and checking in' set_progress_str 23 'Generating CA and checking in'
@@ -626,10 +687,12 @@ fi
set_progress_str 63 "$(print_salt_state_apply 'common')" set_progress_str 63 "$(print_salt_state_apply 'common')"
salt-call state.apply -l info common >> $setup_log 2>&1 salt-call state.apply -l info common >> $setup_log 2>&1
set_progress_str 64 "$(print_salt_state_apply 'nginx')" if [[ ! $is_helix ]]; then
salt-call state.apply -l info nginx >> $setup_log 2>&1 set_progress_str 64 "$(print_salt_state_apply 'nginx')"
salt-call state.apply -l info nginx >> $setup_log 2>&1
fi
if [[ $is_manager || $is_node || $is_import ]]; then if [[ $is_manager || $is_node || $is_import || $is_helix ]]; then
set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')" set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')"
salt-call state.apply -l info elasticsearch >> $setup_log 2>&1 salt-call state.apply -l info elasticsearch >> $setup_log 2>&1
fi fi
@@ -639,12 +702,14 @@ fi
salt-call state.apply -l info pcap >> $setup_log 2>&1 salt-call state.apply -l info pcap >> $setup_log 2>&1
fi fi
if [[ $is_sensor || $is_import ]]; then if [[ $is_sensor || $is_import || $is_helix ]]; then
set_progress_str 66 "$(print_salt_state_apply 'suricata')" set_progress_str 66 "$(print_salt_state_apply 'suricata')"
salt-call state.apply -l info suricata >> $setup_log 2>&1 salt-call state.apply -l info suricata >> $setup_log 2>&1
set_progress_str 67 "$(print_salt_state_apply 'zeek')" if [[ $ZEEKVERSION == 'ZEEK' ]]; then
salt-call state.apply -l info zeek >> $setup_log 2>&1 set_progress_str 67 "$(print_salt_state_apply 'zeek')"
salt-call state.apply -l info zeek >> $setup_log 2>&1
fi
fi fi
if [[ $is_node ]]; then if [[ $is_node ]]; then
@@ -686,8 +751,10 @@ fi
if [[ "$OSQUERY" = 1 ]]; then if [[ "$OSQUERY" = 1 ]]; then
set_progress_str 75 "$(print_salt_state_apply 'fleet.event_enable-fleet')"
salt-call state.apply -l info fleet.event_enable-fleet >> $setup_log 2>&1
set_progress_str 75 "$(print_salt_state_apply 'fleet')" set_progress_str 75 "$(print_salt_state_apply 'fleet')"
salt-call state.apply fleet.event_enable-fleet # enable fleet in the global pillar
salt-call state.apply -l info fleet >> $setup_log 2>&1 salt-call state.apply -l info fleet >> $setup_log 2>&1
set_progress_str 76 "$(print_salt_state_apply 'redis')" set_progress_str 76 "$(print_salt_state_apply 'redis')"
@@ -719,12 +786,14 @@ fi
set_progress_str 81 "$(print_salt_state_apply 'strelka')" set_progress_str 81 "$(print_salt_state_apply 'strelka')"
salt-call state.apply -l info strelka >> $setup_log 2>&1 salt-call state.apply -l info strelka >> $setup_log 2>&1
fi fi
if [[ $STRELKARULES == 1 ]]; then if [[ "$STRELKARULES" = 1 ]]; then
/usr/sbin/so-yara-update >> $setup_log 2>&1 logCmd /usr/sbin/so-yara-update
else
info "Skipping running yara update: STRELKARULES='$STRELKARULES'"
fi fi
fi fi
if [[ $is_manager || $is_helix || $is_import ]]; then if [[ $is_manager || $is_import ]]; then
set_progress_str 82 "$(print_salt_state_apply 'utility')" set_progress_str 82 "$(print_salt_state_apply 'utility')"
salt-call state.apply -l info utility >> $setup_log 2>&1 salt-call state.apply -l info utility >> $setup_log 2>&1
fi fi
@@ -758,21 +827,32 @@ success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ $success != 0 ]]; then SO_ERROR=1; fi if [[ $success != 0 ]]; then SO_ERROR=1; fi
# Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox # Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox
if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then SO_ERROR=1; fi if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then
SO_ERROR=1
grep --color=never "ERROR" "$setup_log" > "$error_log"
fi
if [[ -n $SO_ERROR ]]; then if [[ -n $SO_ERROR ]]; then
echo "Errors detected during setup; skipping post-setup steps to allow for analysis of failures." >> $setup_log 2>&1 echo "Errors detected during setup; skipping post-setup steps to allow for analysis of failures." >> $setup_log 2>&1
SKIP_REBOOT=1 SKIP_REBOOT=1
whiptail_setup_failed whiptail_setup_failed
else else
echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1 echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1
{ {
export percentage=95 # set to last percentage used in previous subshell export percentage=95 # set to last percentage used in previous subshell
if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
set_progress_str 98 "Running so-allow -${ALLOW_ROLE} for ${ALLOW_CIDR}" set_progress_str 97 "Running so-allow -${ALLOW_ROLE} for ${ALLOW_CIDR}"
IP=$ALLOW_CIDR so-allow -$ALLOW_ROLE >> $setup_log 2>&1 IP=$ALLOW_CIDR so-allow -$ALLOW_ROLE >> $setup_log 2>&1
fi fi
if [[ $is_manager ]]; then
set_progress_str 98 "Generating archive for setup directory"
generate_repo_tarball >> "$setup_log" 2>&1
fi
if [[ $THEHIVE == 1 ]]; then if [[ $THEHIVE == 1 ]]; then
set_progress_str 99 'Waiting for TheHive to start up' set_progress_str 99 'Waiting for TheHive to start up'
check_hive_init >> $setup_log 2>&1 check_hive_init >> $setup_log 2>&1
@@ -783,6 +863,6 @@ else
echo "Post-installation steps have completed." >> $setup_log 2>&1 echo "Post-installation steps have completed." >> $setup_log 2>&1
fi fi
install_cleanup >> $setup_log 2>&1 install_cleanup >> "$setup_log" 2>&1
if [[ -z $SKIP_REBOOT ]]; then shutdown -r now; else exit; fi if [[ -z $SKIP_REBOOT ]]; then shutdown -r now; else exit; fi

View File

@@ -1,5 +1,7 @@
#!/bin/bash #!/bin/bash
SOVERSION=$(cat ../VERSION)
total_mem=$(grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//') total_mem=$(grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//')
export total_mem export total_mem
@@ -12,7 +14,7 @@ export num_cpu_cores
readarray -t cpu_core_list <<< "$(grep "processor" /proc/cpuinfo | grep -v "KVM" | awk '{print $3}')" readarray -t cpu_core_list <<< "$(grep "processor" /proc/cpuinfo | grep -v "KVM" | awk '{print $3}')"
export cpu_core_list export cpu_core_list
random_uid=$(</dev/urandom tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1) random_uid=$(get_random_value 16)
export random_uid export random_uid
node_es_port=9200 node_es_port=9200
@@ -21,6 +23,9 @@ export node_es_port
setup_log="/root/sosetup.log" setup_log="/root/sosetup.log"
export setup_log export setup_log
error_log="/root/errors.log"
export error_log
filesystem_root=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }') filesystem_root=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
export filesystem_root export filesystem_root
@@ -59,5 +64,5 @@ mkdir -p "$default_salt_dir"
export local_salt_dir=/opt/so/saltstack/local export local_salt_dir=/opt/so/saltstack/local
mkdir -p "$local_salt_dir" mkdir -p "$local_salt_dir"
SCRIPTDIR=$(cd "$(dirname "$0")" && pwd) SCRIPTDIR=$(pwd)
export SCRIPTDIR export SCRIPTDIR

View File

@@ -15,9 +15,6 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
source ./so-variables
source ./so-common-functions
whiptail_airgap() { whiptail_airgap() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -31,29 +28,11 @@ whiptail_airgap() {
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
} }
whiptail_basic_zeek() {
[ -n "$TESTING" ] && return
if [[ $is_smooshed ]]; then
local PROCS=$(expr $lb_procs / 2)
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
else
local PROCS=$lb_procs
fi
BASICZEEK=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter the number of zeek processes:" 10 75 "$PROCS" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_basic_suri() { whiptail_basic_suri() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
if [[ $is_smooshed ]]; then if [[ $is_node && $is_sensor && ! $is_eval ]]; then
local PROCS=$(expr $lb_procs / 2) local PROCS=$(expr $lb_procs / 2)
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
else else
@@ -68,81 +47,34 @@ whiptail_basic_suri() {
} }
whiptail_zeek_pins() { whiptail_basic_zeek() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
local cpu_core_list_whiptail=() if [[ $is_node && $is_sensor && ! $is_eval ]]; then
for item in "${cpu_core_list[@]}"; do
cpu_core_list_whiptail+=("$item" "OFF")
done
if [[ $is_smooshed ]]; then
local PROCS=$(expr $lb_procs / 2) local PROCS=$(expr $lb_procs / 2)
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
else else
local PROCS=$lb_procs local PROCS=$lb_procs
fi fi
ZEEKPINS=$(whiptail --noitem --title "Pin Zeek CPUS" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 ) BASICZEEK=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter the number of zeek processes:" 10 75 "$PROCS" 3>&1 1>&2 2>&3)
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
ZEEKPINS=$(echo "$ZEEKPINS" | tr -d '"')
IFS=' ' read -ra ZEEKPINS <<< "$ZEEKPINS"
} }
whiptail_zeek_version() { whiptail_zeek_version() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate metadata?" 20 75 4 "ZEEK" "Zeek (formerly known as Bro)" ON \ ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate metadata?" 20 75 4 \
"ZEEK" "Zeek (formerly known as Bro)" ON \
"SURICATA" "Suricata" OFF 3>&1 1>&2 2>&3) "SURICATA" "Suricata" OFF 3>&1 1>&2 2>&3)
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
}
whiptail_sensor_nics() {
[ -n "$TESTING" ] && return
filter_unused_nics
if [[ $is_ec2 ]]; then
local menu_text="Please select NIC for the Monitor Interface:"
local list_type="radiolist"
else
local menu_text="Please add NICs to the Monitor Interface:"
local list_type="checklist"
fi
BNICS=$(whiptail --title "NIC Setup" --$list_type "$menu_text" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [ -z "$BNICS" ]
do
BNICS=$(whiptail --title "NIC Setup" --$list_type "$menu_text" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
BNICS=$(echo "$BNICS" | tr -d '"')
IFS=' ' read -ra BNICS <<< "$BNICS"
for bond_nic in "${BNICS[@]}"; do
if [[ "${nmcli_dev_status_list}" =~ $bond_nic\:unmanaged ]]; then
whiptail \
--title "Security Onion Setup" \
--msgbox "$bond_nic is unmanaged by Network Manager. Please remove it from other network management tools then re-run setup." \
8 75
exit
fi
done
} }
whiptail_bond_nics_mtu() { whiptail_bond_nics_mtu() {
@@ -186,6 +118,13 @@ whiptail_check_exitstatus() {
esac esac
} }
whiptail_components_adv_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 75
}
whiptail_create_admin_user() { whiptail_create_admin_user() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -293,13 +232,6 @@ whiptail_create_web_user() {
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
} }
whiptail_invalid_user_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Please enter a valid email address." 8 75
}
whiptail_create_web_user_password1() { whiptail_create_web_user_password1() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -344,7 +276,7 @@ whiptail_requirements_error() {
if [[ $(echo "$requirement_needed" | tr '[:upper:]' '[:lower:]') == 'nics' ]]; then if [[ $(echo "$requirement_needed" | tr '[:upper:]' '[:lower:]') == 'nics' ]]; then
whiptail --title "Security Onion Setup" \ whiptail --title "Security Onion Setup" \
--msgbox "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Press OK to exit setup and reconfigure the machine." 10 75 --msgbox "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Select OK to exit setup and reconfigure the machine." 10 75
# Same as whiptail_cancel, but changed the wording to exit instead of cancel. # Same as whiptail_cancel, but changed the wording to exit instead of cancel.
whiptail --title "Security Onion Setup" --msgbox "Exiting Setup. No changes have been made." 8 75 whiptail --title "Security Onion Setup" --msgbox "Exiting Setup. No changes have been made." 8 75
@@ -358,7 +290,7 @@ whiptail_requirements_error() {
exit exit
else else
whiptail --title "Security Onion Setup" \ whiptail --title "Security Onion Setup" \
--yesno "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Press YES to continue anyway, or press NO to cancel." 10 75 --yesno "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Select YES to continue anyway, or select NO to cancel." 10 75
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
@@ -379,7 +311,7 @@ whiptail_storage_requirements() {
Visit https://docs.securityonion.net/en/2.1/hardware.html for more information. Visit https://docs.securityonion.net/en/2.1/hardware.html for more information.
Press YES to continue anyway, or press NO to cancel. Select YES to continue anyway, or select NO to cancel.
EOM EOM
whiptail \ whiptail \
@@ -440,7 +372,7 @@ whiptail_dhcp_warn() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
if [[ $setup_type == "iso" ]]; then if [[ $setup_type == "iso" ]]; then
local interaction_text="Press YES to keep DHCP or NO to go back." local interaction_text="Select YES to keep DHCP or NO to go back."
local window_type="yesno" local window_type="yesno"
else else
local interaction_text="Press ENTER to continue." local interaction_text="Press ENTER to continue."
@@ -494,7 +426,7 @@ whiptail_dockernet_net() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
DOCKERNET=$(whiptail --title "Security Onion Setup" --inputbox \ DOCKERNET=$(whiptail --title "Security Onion Setup" --inputbox \
"\nEnter a /24 network range for docker to use: \nThe same range MUST be used on ALL nodes \n(Default value is pre-populated.)" 10 75 172.17.0.0 3>&1 1>&2 2>&3) "\nEnter a /24 size network range for docker to use WITHOUT the /24 notation: \nThis range will be used on ALL nodes \n(Default value is pre-populated.)" 10 75 172.17.0.0 3>&1 1>&2 2>&3)
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
@@ -545,11 +477,26 @@ whiptail_eval_adv() {
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
} }
whiptail_components_adv_warning() { whiptail_fleet_custom_hostname() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 75 FLEETCUSTOMHOSTNAME=$(whiptail --title "Security Onion Install" --inputbox \
"What FQDN should osquery clients use for connections to this Fleet node? Leave blank if the local system hostname will be used." 10 60 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_gauge_post_setup() {
if [ -n "$TESTING" ]; then
cat >> $setup_log 2>&1
else
local msg=$1
whiptail --title "Security Onion Setup" --gauge "$msg" 6 60 96
fi
} }
whiptail_helix_apikey() { whiptail_helix_apikey() {
@@ -676,6 +623,27 @@ whiptail_install_type_other() {
export install_type export install_type
} }
whiptail_invalid_pass_characters_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password." 8 75
}
whiptail_invalid_pass_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Please choose a more secure password." 8 75
}
whiptail_invalid_user_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Please enter a valid email address." 8 75
}
whiptail_log_size_limit() { whiptail_log_size_limit() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -690,6 +658,17 @@ whiptail_log_size_limit() {
} }
whiptail_make_changes() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "We are going to set this machine up as a $install_type. Please press YES to make changes or NO to cancel." 8 75
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_management_interface_dns() { whiptail_management_interface_dns() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -697,6 +676,8 @@ whiptail_management_interface_dns() {
MDNS=$(whiptail --title "Security Onion Setup" --inputbox \ MDNS=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your DNS servers separated by a space:" 10 60 8.8.8.8 8.8.4.4 3>&1 1>&2 2>&3) "Enter your DNS servers separated by a space:" 10 60 8.8.8.8 8.8.4.4 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
} }
whiptail_management_interface_dns_search() { whiptail_management_interface_dns_search() {
@@ -706,6 +687,8 @@ whiptail_management_interface_dns_search() {
MSEARCH=$(whiptail --title "Security Onion Setup" --inputbox \ MSEARCH=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your DNS search domain:" 10 60 searchdomain.local 3>&1 1>&2 2>&3) "Enter your DNS search domain:" 10 60 searchdomain.local 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
} }
whiptail_management_interface_gateway() { whiptail_management_interface_gateway() {
@@ -715,6 +698,8 @@ whiptail_management_interface_gateway() {
MGATEWAY=$(whiptail --title "Security Onion Setup" --inputbox \ MGATEWAY=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your gateway:" 10 60 X.X.X.X 3>&1 1>&2 2>&3) "Enter your gateway:" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
} }
whiptail_management_interface_ip() { whiptail_management_interface_ip() {
@@ -724,6 +709,8 @@ whiptail_management_interface_ip() {
MIP=$(whiptail --title "Security Onion Setup" --inputbox \ MIP=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your IP address:" 10 60 X.X.X.X 3>&1 1>&2 2>&3) "Enter your IP address:" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
} }
whiptail_management_interface_mask() { whiptail_management_interface_mask() {
@@ -733,6 +720,8 @@ whiptail_management_interface_mask() {
MMASK=$(whiptail --title "Security Onion Setup" --inputbox \ MMASK=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter the bit mask for your subnet:" 10 60 24 3>&1 1>&2 2>&3) "Enter the bit mask for your subnet:" 10 60 24 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
} }
whiptail_management_nic() { whiptail_management_nic() {
@@ -754,42 +743,43 @@ whiptail_management_nic() {
} }
whiptail_nids() { whiptail_management_interface_setup() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
NIDS=$(whiptail --title "Security Onion Setup" --radiolist \ local minion_msg
"Choose which IDS to run: \n\n(Snort 3.0 support will be added once it is out of beta.)" 25 75 4 \ local msg
"Suricata" "Suricata" ON \ local line_count
"Snort" "Placeholder for Snort 3.0 " OFF 3>&1 1>&2 2>&3 )
if [[ $is_minion ]]; then
line_count=11
minion_msg="copy the ssh key for soremote to the manager. This will bring you to the command line temporarily to accept the manager's ECDSA certificate and enter the password for soremote"
else
line_count=9
minion_msg=""
fi
if [[ $is_iso ]]; then
if [[ $minion_msg != "" ]]; then
msg="initialize networking and $minion_msg"
else
msg="initialize networking"
fi
else
msg=$minion_msg
fi
read -r -d '' message <<- EOM
Setup will now $msg.
Select OK to continue.
EOM
whiptail --title "Security Onion Setup" --msgbox "$message" $line_count 75
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
} }
whiptail_oinkcode() {
[ -n "$TESTING" ] && return
OINKCODE=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your ET Pro or oinkcode:" 10 75 XXXXXXX 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_make_changes() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "We are going to set this machine up as a $install_type. Please press YES to make changes or NO to cancel." 8 75
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_management_server() { whiptail_management_server() {
@@ -840,6 +830,30 @@ whiptail_manager_adv() {
} }
# Ask if you want to do true clustering
whiptail_manager_adv_escluster(){
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno \
"Do you want to set up a traditional ES cluster for using replicas and/or Hot-Warm indices? Recommended only for those who have experience with ES clustering! " 12 75
local exitstatus=$?
if [[ $exitstatus == 0 ]]; then
whiptail_manager_adv_escluster_name
fi
}
# Get a cluster name
whiptail_manager_adv_escluster_name(){
[ -n "$TESTING" ] && return
ESCLUSTERNAME=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter a name for your ES cluster!" 10 75 securityonion 3>&1 1>&2 2>&3)
}
# Ask which additional components to install # Ask which additional components to install
whiptail_manager_adv_service_zeeklogs() { whiptail_manager_adv_service_zeeklogs() {
@@ -894,11 +908,59 @@ whiptail_manager_adv_service_zeeklogs() {
} }
whiptail_manager_updates() {
[ -n "$TESTING" ] && return
local update_string
update_string=$(whiptail --title "Security Onion Setup" --radiolist \
"How would you like to download OS package updates for your grid?" 20 75 4 \
"MANAGER" "Manager node is proxy for updates" ON \
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
case "$update_string" in
'MANAGER')
export MANAGERUPDATES='1'
;;
*)
export MANAGERUPDATES='0'
;;
esac
}
whiptail_manager_updates_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup"\
--msgbox "Updating through the manager node requires the manager to have internet access, press ENTER to continue."\
8 75
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_nids() {
[ -n "$TESTING" ] && return
NIDS=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose which IDS to run: \n\n(Snort 3.0 support will be added once it is out of beta.)" 25 75 4 \
"Suricata" "Suricata" ON \
"Snort" "Placeholder for Snort 3.0 " OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_network_notice() { whiptail_network_notice() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Press YES to continue." 8 75 whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Select YES to continue." 8 75
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
@@ -979,6 +1041,18 @@ whiptail_node_ls_input_threads() {
} }
whiptail_oinkcode() {
[ -n "$TESTING" ] && return
OINKCODE=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your ET Pro or oinkcode:" 10 75 XXXXXXX 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
#TODO: helper function to display error message or exit if batch mode #TODO: helper function to display error message or exit if batch mode
# exit_if_batch <"Error string"> <Error code (int)> # exit_if_batch <"Error string"> <Error code (int)>
@@ -1133,6 +1207,21 @@ whiptail_patch_schedule_select_hours() {
} }
whiptail_requirements_error() {
local requirement_needed=$1
local current_val=$2
local needed_val=$3
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" \
--yesno "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Press YES to continue anyway, or press NO to cancel." 10 75
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_rule_setup() { whiptail_rule_setup() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -1164,6 +1253,46 @@ whiptail_sensor_config() {
} }
whiptail_sensor_nics() {
[ -n "$TESTING" ] && return
filter_unused_nics
if [[ $is_ec2 ]]; then
local menu_text="Please select NIC for the Monitor Interface:"
local list_type="radiolist"
else
local menu_text="Please add NICs to the Monitor Interface:"
local list_type="checklist"
fi
BNICS=$(whiptail --title "NIC Setup" --$list_type "$menu_text" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [ -z "$BNICS" ]
do
BNICS=$(whiptail --title "NIC Setup" --$list_type "$menu_text" 20 75 12 "${nic_list[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
BNICS=$(echo "$BNICS" | tr -d '"')
IFS=' ' read -ra BNICS <<< "$BNICS"
for bond_nic in "${BNICS[@]}"; do
if [[ "${nmcli_dev_status_list}" =~ $bond_nic\:unmanaged ]]; then
whiptail \
--title "Security Onion Setup" \
--msgbox "$bond_nic is unmanaged by Network Manager. Please remove it from other network management tools then re-run setup." \
8 75
exit
fi
done
}
whiptail_set_hostname() { whiptail_set_hostname() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -1248,7 +1377,20 @@ whiptail_setup_failed() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press Ok to exit." 8 75 local check_err_msg
local height
[ -f "$error_log" ] && check_err_msg="A summary of errors can be found in $error_log.\n"
if [[ -n $check_err_msg ]]; then height=11; else height=10; fi
read -r -d '' message <<- EOM
Install had a problem. Please see $setup_log for details.\n
$check_err_msg
Press Ok to exit.
EOM
whiptail --title "Security Onion Setup" --msgbox "$message" $height 75
} }
whiptail_shard_count() { whiptail_shard_count() {
@@ -1284,15 +1426,30 @@ whiptail_so_allow() {
fi fi
} }
whiptail_gauge_post_setup() { whiptail_storage_requirements() {
local mount=$1
local current_val=$2
local needed_val=$3
if [ -n "$TESTING" ]; then [ -n "$TESTING" ] && return
cat >> $setup_log 2>&1
else
local msg=$1
whiptail --title "Security Onion Setup" --gauge "$msg" 6 60 96 read -r -d '' message <<- EOM
fi Free space on mount point '${mount}' is currently ${current_val}.
You need ${needed_val} to meet minimum requirements.
Visit https://docs.securityonion.net/en/2.1/hardware.html for more information.
Press YES to continue anyway, or press NO to cancel.
EOM
whiptail \
--title "Security Onion Setup" \
--yesno "$message" \
14 75
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
} }
whiptail_strelka_rules() { whiptail_strelka_rules() {
@@ -1314,11 +1471,11 @@ whiptail_suricata_pins() {
readarray -t filtered_core_list <<< "$(echo "${cpu_core_list[@]}" "${ZEEKPINS[@]}" | xargs -n1 | sort | uniq -u | awk '{print $1}')" readarray -t filtered_core_list <<< "$(echo "${cpu_core_list[@]}" "${ZEEKPINS[@]}" | xargs -n1 | sort | uniq -u | awk '{print $1}')"
local filtered_core_str=() local filtered_core_str=()
for item in "${filtered_core_list[@]}"; do for item in "${filtered_core_list[@]}"; do
filtered_core_str+=("$item" "") filtered_core_str+=("$item" "")
done done
if [[ $is_smooshed ]]; then if [[ $is_node && $is_sensor && ! $is_eval ]]; then
local PROCS=$(expr $lb_procs / 2) local PROCS=$(expr $lb_procs / 2)
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
else else
@@ -1335,40 +1492,6 @@ whiptail_suricata_pins() {
} }
whiptail_manager_updates() {
[ -n "$TESTING" ] && return
local update_string
update_string=$(whiptail --title "Security Onion Setup" --radiolist \
"How would you like to download OS package updates for your grid?" 20 75 4 \
"MANAGER" "Manager node is proxy for updates" ON \
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
case "$update_string" in
'MANAGER')
export MANAGERUPDATES='1'
;;
*)
export MANAGERUPDATES='0'
;;
esac
}
whiptail_manager_updates_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup"\
--msgbox "Updating through the manager node requires the manager to have internet access, press ENTER to continue."\
8 75
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_node_updates() { whiptail_node_updates() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -1406,3 +1529,40 @@ whiptail_you_sure() {
return $exitstatus return $exitstatus
} }
whiptail_zeek_pins() {
[ -n "$TESTING" ] && return
local cpu_core_list_whiptail=()
for item in "${cpu_core_list[@]}"; do
cpu_core_list_whiptail+=("$item" "OFF")
done
if [[ $is_smooshed ]]; then
local PROCS=$(expr $lb_procs / 2)
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
else
local PROCS=$lb_procs
fi
ZEEKPINS=$(whiptail --noitem --title "Pin Zeek CPUS" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
ZEEKPINS=$(echo "$ZEEKPINS" | tr -d '"')
IFS=' ' read -ra ZEEKPINS <<< "$ZEEKPINS"
}
whiptail_zeek_version() {
[ -n "$TESTING" ] && return
ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate metadata?" 20 75 4 "ZEEK" "Zeek (formerly known as Bro)" ON \
"SURICATA" "Suricata" OFF 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}

Binary file not shown.