diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE new file mode 100644 index 000000000..e02405f16 --- /dev/null +++ b/.github/ISSUE_TEMPLATE @@ -0,0 +1,12 @@ +PLEASE STOP AND READ THIS INFORMATION! + +If you are creating an issue just to ask a question, you will likely get faster and better responses by posting to our discussions forum instead: +https://securityonion.net/discuss + +If you think you have found a possible bug or are observing a behavior that you weren't expecting, use the discussion forum to start a conversation about it instead of creating an issue. + +If you are very familiar with the latest version of the product and are confident you have found a bug in Security Onion, you can continue with creating an issue here, but please make sure you have done the following: +- duplicated the issue on a fresh installation of the latest version +- provide information about your system and how you installed Security Onion +- include relevant log files +- include reproduction steps diff --git a/.github/workflows/leaktest.yml b/.github/workflows/leaktest.yml new file mode 100644 index 000000000..e66a06fa8 --- /dev/null +++ b/.github/workflows/leaktest.yml @@ -0,0 +1,15 @@ +name: leak-test + +on: [push,pull_request] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + + - name: Gitleaks + uses: zricethezav/gitleaks-action@master diff --git a/KEYS b/KEYS index 4844a1d94..15be14ca9 100644 --- a/KEYS +++ b/KEYS @@ -1,4 +1,5 @@ -----BEGIN PGP PUBLIC KEY BLOCK----- + mQINBF7rzwEBEADBg87uJhnC3Ls7s60hbHGaywGrPtbz2WuYA/ev3YS3X7WS75p8 PGlzTWUCujx0pEHbK2vYfExl3zksZ8ZmLyZ9VB3oSLiWBzJgKAeB7YCFEo8te+eE P2Z+8c+kX4eOV+2waxZyewA2TipSkhWgStSI4Ow8SyVUcUWA3hCw7mo2duNVi7KO diff --git a/README.md b/README.md index 38e1d64dd..7a2d2e4a2 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,14 @@ -## Security Onion 2.3.1 +## Security Onion 2.3.10 -Security Onion 2.3.1 is here! +Security Onion 2.3.10 is here! +## Screenshots + +Alerts +![Alerts](https://raw.githubusercontent.com/security-onion-solutions/securityonion/master/screenshots/alerts-1.png) + +Hunt +![Hunt](https://raw.githubusercontent.com/security-onion-solutions/securityonion/master/screenshots/hunt-1.png) ### Release Notes diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index 26b926971..ed450a342 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -1,16 +1,16 @@ -### 2.3.1 ISO image built on 2020/10/22 +### 2.3.10 ISO image built on 2020/11/19 ### Download and Verify -2.3.1 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.3.1.iso +2.3.10 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.3.10.iso -MD5: EF2DEBCCBAE0B0BCCC906552B5FF918A -SHA1: 16AFCACB102BD217A038044D64E7A86DA351640E -SHA256: 7125F90B6323179D0D29F5745681BE995BD2615E64FA1E0046D94888A72C539E +MD5: 55E10BAE3D90DF47CA4D5DCCDCB67A96 +SHA1: 01361123F35CEACE077803BC8074594D57EE653A +SHA256: 772EA4EFFFF12F026593F5D1CC93DB538CC17B9BA5F60308F1976B6ED7032A8D Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.1.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.10.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS @@ -24,22 +24,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.1.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.10.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.3.1.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.3.10.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.3.1.iso.sig securityonion-2.3.1.iso +gpg --verify securityonion-2.3.10.iso.sig securityonion-2.3.10.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Thu 22 Oct 2020 10:34:27 AM EDT using RSA key ID FE507013 +gpg: Signature made Thu 19 Nov 2020 03:38:54 PM EST using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/VERSION b/VERSION index 9fa5f12ab..69484413e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3.10 +2.3.20 \ No newline at end of file diff --git a/salt/_modules/so.py b/salt/_modules/so.py index 50c29902f..e75c90ec8 100644 --- a/salt/_modules/so.py +++ b/salt/_modules/so.py @@ -1,4 +1,51 @@ #!py +import logging + def status(): - return __salt__['cmd.run']('/usr/sbin/so-status') \ No newline at end of file + return __salt__['cmd.run']('/usr/sbin/so-status') + + +def mysql_conn(retry): + log = logging.getLogger(__name__) + + from time import sleep + + try: + from MySQLdb import _mysql + except ImportError as e: + log.error(e) + return False + + mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('manager:mainint')) + mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0] + + mysql_up = False + for i in range(0, retry): + log.debug(f'Connection attempt {i+1}') + try: + db = _mysql.connect( + host=mainip, + user='root', + passwd=__salt__['pillar.get']('secrets:mysql') + ) + log.debug(f'Connected to MySQL server on {mainip} after {i} attempts.') + + db.query("""SELECT 1;""") + log.debug(f'Successfully completed query against MySQL server on {mainip}') + + db.close() + mysql_up = True + break + except _mysql.OperationalError as e: + log.debug(e) + except Exception as e: + log.error('Unexpected error occured.') + log.error(e) + break + sleep(1) + + if not mysql_up: + log.error(f'Could not connect to MySQL server on {mainip} after {retry} attempts.') + + return mysql_up \ No newline at end of file diff --git a/salt/common/files/log-rotate.conf b/salt/common/files/log-rotate.conf index d383981cd..8f1df0307 100644 --- a/salt/common/files/log-rotate.conf +++ b/salt/common/files/log-rotate.conf @@ -18,6 +18,7 @@ /opt/so/log/filebeat/*.log /opt/so/log/telegraf/*.log /opt/so/log/redis/*.log +/opt/so/log/salt/so-salt-minion-check { {{ logrotate_conf | indent(width=4) }} } diff --git a/salt/common/init.sls b/salt/common/init.sls index 769484ef3..cf791cfa2 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -32,6 +32,18 @@ soconfperms: - gid: 939 - dir_mode: 770 +sostatusconf: + file.directory: + - name: /opt/so/conf/so-status + - uid: 939 + - gid: 939 + - dir_mode: 770 + +so-status.conf: + file.touch: + - name: /opt/so/conf/so-status/so-status.conf + - unless: ls /opt/so/conf/so-status/so-status.conf + sosaltstackperms: file.directory: - name: /opt/so/saltstack @@ -158,8 +170,8 @@ Etc/UTC: utilsyncscripts: file.recurse: - name: /usr/sbin - - user: 0 - - group: 0 + - user: root + - group: root - file_mode: 755 - template: jinja - source: salt://common/tools/sbin diff --git a/salt/common/maps/domainstats.map.jinja b/salt/common/maps/domainstats.map.jinja deleted file mode 100644 index 221dcde03..000000000 --- a/salt/common/maps/domainstats.map.jinja +++ /dev/null @@ -1,5 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-domainstats' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/eval.map.jinja b/salt/common/maps/eval.map.jinja deleted file mode 100644 index 075344e82..000000000 --- a/salt/common/maps/eval.map.jinja +++ /dev/null @@ -1,20 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-filebeat', - 'so-nginx', - 'so-telegraf', - 'so-dockerregistry', - 'so-soc', - 'so-kratos', - 'so-idstools', - 'so-elasticsearch', - 'so-kibana', - 'so-steno', - 'so-suricata', - 'so-zeek', - 'so-curator', - 'so-elastalert', - 'so-soctopus', - 'so-sensoroni' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/fleet.map.jinja b/salt/common/maps/fleet.map.jinja deleted file mode 100644 index c55223125..000000000 --- a/salt/common/maps/fleet.map.jinja +++ /dev/null @@ -1,10 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-mysql', - 'so-fleet', - 'so-redis', - 'so-filebeat', - 'so-nginx', - 'so-telegraf' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/fleet_manager.map.jinja b/salt/common/maps/fleet_manager.map.jinja deleted file mode 100644 index 91850846c..000000000 --- a/salt/common/maps/fleet_manager.map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-mysql', - 'so-fleet', - 'so-redis' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/freq.map.jinja b/salt/common/maps/freq.map.jinja deleted file mode 100644 index d3f692484..000000000 --- a/salt/common/maps/freq.map.jinja +++ /dev/null @@ -1,5 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-freqserver' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/grafana.map.jinja b/salt/common/maps/grafana.map.jinja deleted file mode 100644 index 1118a50fe..000000000 --- a/salt/common/maps/grafana.map.jinja +++ /dev/null @@ -1,6 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-influxdb', - 'so-grafana' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/heavynode.map.jinja b/salt/common/maps/heavynode.map.jinja deleted file mode 100644 index cbd0fc3b0..000000000 --- a/salt/common/maps/heavynode.map.jinja +++ /dev/null @@ -1,15 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-nginx', - 'so-telegraf', - 'so-redis', - 'so-logstash', - 'so-elasticsearch', - 'so-curator', - 'so-steno', - 'so-suricata', - 'so-wazuh', - 'so-filebeat', - 'so-sensoroni' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/helixsensor.map.jinja b/salt/common/maps/helixsensor.map.jinja deleted file mode 100644 index 84866de3a..000000000 --- a/salt/common/maps/helixsensor.map.jinja +++ /dev/null @@ -1,12 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-nginx', - 'so-telegraf', - 'so-idstools', - 'so-steno', - 'so-zeek', - 'so-redis', - 'so-logstash', - 'so-filebeat - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/hotnode.map.jinja b/salt/common/maps/hotnode.map.jinja deleted file mode 100644 index bc9d58360..000000000 --- a/salt/common/maps/hotnode.map.jinja +++ /dev/null @@ -1,9 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-nginx', - 'so-telegraf', - 'so-logstash', - 'so-elasticsearch', - 'so-curator', - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/import.map.jinja b/salt/common/maps/import.map.jinja deleted file mode 100644 index 324536d11..000000000 --- a/salt/common/maps/import.map.jinja +++ /dev/null @@ -1,10 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-filebeat', - 'so-nginx', - 'so-soc', - 'so-kratos', - 'so-elasticsearch', - 'so-kibana' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/manager.map.jinja b/salt/common/maps/manager.map.jinja deleted file mode 100644 index 45358d017..000000000 --- a/salt/common/maps/manager.map.jinja +++ /dev/null @@ -1,21 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-dockerregistry', - 'so-nginx', - 'so-telegraf', - 'so-soc', - 'so-kratos', - 'so-idstools', - 'so-redis', - 'so-elasticsearch', - 'so-logstash', - 'so-kibana', - 'so-elastalert', - 'so-filebeat', - 'so-soctopus' - ] -} %} - -{% if salt['pillar.get']('global:managerupdate') == 1 %} - {% do docker.containers.append('so-aptcacherng') %} -{% endif %} \ No newline at end of file diff --git a/salt/common/maps/managersearch.map.jinja b/salt/common/maps/managersearch.map.jinja deleted file mode 100644 index 66c5afd43..000000000 --- a/salt/common/maps/managersearch.map.jinja +++ /dev/null @@ -1,21 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-nginx', - 'so-telegraf', - 'so-soc', - 'so-kratos', - 'so-idstools', - 'so-redis', - 'so-logstash', - 'so-elasticsearch', - 'so-curator', - 'so-kibana', - 'so-elastalert', - 'so-filebeat', - 'so-soctopus' - ] -} %} - -{% if salt['pillar.get']('global:managerupdate') == 1 %} - {% do docker.containers.append('so-aptcacherng') %} -{% endif %} \ No newline at end of file diff --git a/salt/common/maps/mdengine.map.jinja b/salt/common/maps/mdengine.map.jinja deleted file mode 100644 index 881e3ec4f..000000000 --- a/salt/common/maps/mdengine.map.jinja +++ /dev/null @@ -1,5 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-zeek' - ] -} %} diff --git a/salt/common/maps/playbook.map.jinja b/salt/common/maps/playbook.map.jinja deleted file mode 100644 index 84baa8dec..000000000 --- a/salt/common/maps/playbook.map.jinja +++ /dev/null @@ -1,5 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-playbook' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/searchnode.map.jinja b/salt/common/maps/searchnode.map.jinja deleted file mode 100644 index b46652742..000000000 --- a/salt/common/maps/searchnode.map.jinja +++ /dev/null @@ -1,10 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-nginx', - 'so-telegraf', - 'so-logstash', - 'so-elasticsearch', - 'so-curator', - 'so-filebeat' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/sensor.map.jinja b/salt/common/maps/sensor.map.jinja deleted file mode 100644 index 3f5ebe8eb..000000000 --- a/salt/common/maps/sensor.map.jinja +++ /dev/null @@ -1,9 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-telegraf', - 'so-steno', - 'so-suricata', - 'so-filebeat', - 'so-sensoroni' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/so-status.map.jinja b/salt/common/maps/so-status.map.jinja deleted file mode 100644 index 12bddfec7..000000000 --- a/salt/common/maps/so-status.map.jinja +++ /dev/null @@ -1,48 +0,0 @@ -{% set role = grains.id.split('_') | last %} -{% from 'common/maps/'~ role ~'.map.jinja' import docker with context %} - -# Check if the service is enabled and append it's required containers -# to the list predefined by the role / minion id affix -{% macro append_containers(pillar_name, k, compare )%} - {% if salt['pillar.get'](pillar_name~':'~k, {}) != compare %} - {% if k == 'enabled' %} - {% set k = pillar_name %} - {% endif %} - {% from 'common/maps/'~k~'.map.jinja' import docker as d with context %} - {% for li in d['containers'] %} - {{ docker['containers'].append(li) }} - {% endfor %} - {% endif %} -{% endmacro %} - -{% set docker = salt['grains.filter_by']({ - '*_'~role: { - 'containers': docker['containers'] - } -},grain='id', merge=salt['pillar.get']('docker')) %} - -{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %} - {{ append_containers('manager', 'grafana', 0) }} - {{ append_containers('global', 'fleet_manager', 0) }} - {{ append_containers('global', 'wazuh', 0) }} - {{ append_containers('manager', 'thehive', 0) }} - {{ append_containers('manager', 'playbook', 0) }} - {{ append_containers('manager', 'freq', 0) }} - {{ append_containers('manager', 'domainstats', 0) }} -{% endif %} - -{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %} - {{ append_containers('strelka', 'enabled', 0) }} -{% endif %} - -{% if role in ['heavynode', 'standalone'] %} - {{ append_containers('global', 'mdengine', 'SURICATA') }} -{% endif %} - -{% if role == 'searchnode' %} - {{ append_containers('manager', 'wazuh', 0) }} -{% endif %} - -{% if role == 'sensor' %} - {{ append_containers('global', 'mdengine', 'SURICATA') }} -{% endif %} \ No newline at end of file diff --git a/salt/common/maps/standalone.map.jinja b/salt/common/maps/standalone.map.jinja deleted file mode 100644 index ae3177f4b..000000000 --- a/salt/common/maps/standalone.map.jinja +++ /dev/null @@ -1,25 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-nginx', - 'so-telegraf', - 'so-soc', - 'so-kratos', - 'so-idstools', - 'so-redis', - 'so-logstash', - 'so-elasticsearch', - 'so-curator', - 'so-kibana', - 'so-elastalert', - 'so-filebeat', - 'so-suricata', - 'so-steno', - 'so-dockerregistry', - 'so-soctopus', - 'so-sensoroni' - ] -} %} - -{% if salt['pillar.get']('global:managerupdate') == 1 %} - {% do docker.containers.append('so-aptcacherng') %} -{% endif %} \ No newline at end of file diff --git a/salt/common/maps/strelka.map.jinja b/salt/common/maps/strelka.map.jinja deleted file mode 100644 index b26a1241b..000000000 --- a/salt/common/maps/strelka.map.jinja +++ /dev/null @@ -1,9 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-strelka-coordinator', - 'so-strelka-gatekeeper', - 'so-strelka-manager', - 'so-strelka-frontend', - 'so-strelka-filestream' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/thehive.map.jinja b/salt/common/maps/thehive.map.jinja deleted file mode 100644 index e4ca7d2a2..000000000 --- a/salt/common/maps/thehive.map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-thehive', - 'so-thehive-es', - 'so-cortex' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/warmnode.map.jinja b/salt/common/maps/warmnode.map.jinja deleted file mode 100644 index 08cf2dbb8..000000000 --- a/salt/common/maps/warmnode.map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-nginx', - 'so-telegraf', - 'so-elasticsearch' - ] -} %} \ No newline at end of file diff --git a/salt/common/maps/wazuh.map.jinja b/salt/common/maps/wazuh.map.jinja deleted file mode 100644 index 5217a79ee..000000000 --- a/salt/common/maps/wazuh.map.jinja +++ /dev/null @@ -1,5 +0,0 @@ -{% set docker = { - 'containers': [ - 'so-wazuh' - ] -} %} \ No newline at end of file diff --git a/salt/common/scripts/dockernet.sh b/salt/common/scripts/dockernet.sh deleted file mode 100755 index b317e4006..000000000 --- a/salt/common/scripts/dockernet.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -if [ ! -f /opt/so/state/dockernet.state ]; then - docker network create -d bridge so-elastic-net - touch /opt/so/state/dockernet.state -else - exit -fi diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index b1dd425f8..1dfa22a5f 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -15,12 +15,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -IMAGEREPO=securityonion - # Check for prerequisites if [ "$(id -u)" -ne 0 ]; then - echo "This script must be run using sudo!" - exit 1 + echo "This script must be run using sudo!" + exit 1 fi # Define a banner to separate sections @@ -31,14 +29,43 @@ header() { printf '%s\n' "$banner" "$*" "$banner" } +lookup_salt_value() { + key=$1 + group=$2 + kind=$3 + + if [ -z "$kind" ]; then + kind=pillar + fi + + if [ -n "$group" ]; then + group=${group}: + fi + + salt-call --no-color ${kind}.get ${group}${key} --out=newline_values_only +} + lookup_pillar() { - key=$1 - salt-call --no-color pillar.get global:${key} --out=newline_values_only + key=$1 + pillar=$2 + if [ -z "$pillar" ]; then + pillar=global + fi + lookup_salt_value "$key" "$pillar" "pillar" } lookup_pillar_secret() { - key=$1 - salt-call --no-color pillar.get secrets:${key} --out=newline_values_only + lookup_pillar "$1" "secrets" +} + +lookup_grain() { + lookup_salt_value "$1" "" "grains" +} + +lookup_role() { + id=$(lookup_grain id) + pieces=($(echo $id | tr '_' ' ')) + echo ${pieces[1]} } check_container() { @@ -47,7 +74,64 @@ check_container() { } check_password() { - local password=$1 - echo "$password" | egrep -v "'|\"|\\\\" > /dev/null 2>&1 - return $? -} \ No newline at end of file + local password=$1 + echo "$password" | egrep -v "'|\"|\\$|\\\\" > /dev/null 2>&1 + return $? +} + +set_os() { + if [ -f /etc/redhat-release ]; then + OS=centos + else + OS=ubuntu + fi +} + +set_minionid() { + MINIONID=$(lookup_grain id) +} + +set_version() { + CURRENTVERSION=0.0.0 + if [ -f /etc/soversion ]; then + CURRENTVERSION=$(cat /etc/soversion) + fi + if [ -z "$VERSION" ]; then + if [ -z "$NEWVERSION" ]; then + if [ "$CURRENTVERSION" == "0.0.0" ]; then + echo "ERROR: Unable to detect Security Onion version; terminating script." + exit 1 + else + VERSION=$CURRENTVERSION + fi + else + VERSION="$NEWVERSION" + fi + fi +} + +require_manager() { + # Check to see if this is a manager + MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') + if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ] || [ $MANAGERCHECK == 'so-import' ]; then + echo "This is a manager, We can proceed." + else + echo "Please run this command on the manager; the manager controls the grid." + exit 1 + fi +} + +is_single_node_grid() { + role=$(lookup_role) + if [ "$role" != "eval" ] && [ "$role" != "standalone" ] && [ "$role" != "import" ]; then + return 1 + fi + return 0 +} + +fail() { + msg=$1 + echo "ERROR: $msg" + echo "Exiting." + exit 1 +} diff --git a/salt/common/tools/sbin/so-cortex-user-add b/salt/common/tools/sbin/so-cortex-user-add index 728ad25f1..1fdada70d 100755 --- a/salt/common/tools/sbin/so-cortex-user-add +++ b/salt/common/tools/sbin/so-cortex-user-add @@ -31,7 +31,7 @@ fi USER=$1 CORTEX_KEY=$(lookup_pillar cortexkey) -CORTEX_IP=$(lookup_pillar managerip) +CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api" CORTEX_ORG_NAME=$(lookup_pillar cortexorgname) CORTEX_USER=$USER @@ -43,7 +43,7 @@ fi read -rs CORTEX_PASS # Create new user in Cortex -resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }") +resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }") if [[ "$resp" =~ \"status\":\"Ok\" ]]; then echo "Successfully added user to Cortex." else diff --git a/salt/common/tools/sbin/so-cortex-user-enable b/salt/common/tools/sbin/so-cortex-user-enable index cbfdceb25..c67b358b2 100755 --- a/salt/common/tools/sbin/so-cortex-user-enable +++ b/salt/common/tools/sbin/so-cortex-user-enable @@ -31,7 +31,7 @@ fi USER=$1 CORTEX_KEY=$(lookup_pillar cortexkey) -CORTEX_IP=$(lookup_pillar managerip) +CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api" CORTEX_USER=$USER case "${2^^}" in @@ -46,7 +46,7 @@ case "${2^^}" in ;; esac -resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }") +resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }") if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then echo "Successfully updated user in Cortex." else diff --git a/salt/common/tools/sbin/so-docker-refresh b/salt/common/tools/sbin/so-docker-refresh index 770d9f241..452bc3121 100755 --- a/salt/common/tools/sbin/so-docker-refresh +++ b/salt/common/tools/sbin/so-docker-refresh @@ -16,96 +16,7 @@ # along with this program. If not, see . . /usr/sbin/so-common +. /usr/sbin/so-image-common -manager_check() { - # Check to see if this is a manager - MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') - if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ]; then - echo "This is a manager. We can proceed" - else - echo "Please run soup on the manager. The manager controls all updates." - exit 1 - fi -} - -update_docker_containers() { - - # Download the containers from the interwebs - for i in "${TRUSTED_CONTAINERS[@]}" - do - # Pull down the trusted docker image - echo "Downloading $i" - docker pull --disable-content-trust=false docker.io/$IMAGEREPO/$i - # Tag it with the new registry destination - docker tag $IMAGEREPO/$i $HOSTNAME:5000/$IMAGEREPO/$i - docker push $HOSTNAME:5000/$IMAGEREPO/$i - done - -} - -version_check() { - if [ -f /etc/soversion ]; then - VERSION=$(cat /etc/soversion) - else - echo "Unable to detect version. I will now terminate." - exit 1 - fi -} - -manager_check -version_check - -# Use the hostname -HOSTNAME=$(hostname) -# List all the containers -if [ $MANAGERCHECK != 'so-helix' ]; then - TRUSTED_CONTAINERS=( \ - "so-acng:$VERSION" \ - "so-thehive-cortex:$VERSION" \ - "so-curator:$VERSION" \ - "so-domainstats:$VERSION" \ - "so-elastalert:$VERSION" \ - "so-elasticsearch:$VERSION" \ - "so-filebeat:$VERSION" \ - "so-fleet:$VERSION" \ - "so-fleet-launcher:$VERSION" \ - "so-freqserver:$VERSION" \ - "so-grafana:$VERSION" \ - "so-idstools:$VERSION" \ - "so-influxdb:$VERSION" \ - "so-kibana:$VERSION" \ - "so-kratos:$VERSION" \ - "so-logstash:$VERSION" \ - "so-minio:$VERSION" \ - "so-mysql:$VERSION" \ - "so-nginx:$VERSION" \ - "so-pcaptools:$VERSION" \ - "so-playbook:$VERSION" \ - "so-redis:$VERSION" \ - "so-soc:$VERSION" \ - "so-soctopus:$VERSION" \ - "so-steno:$VERSION" \ - "so-strelka-frontend:$VERSION" \ - "so-strelka-manager:$VERSION" \ - "so-strelka-backend:$VERSION" \ - "so-strelka-filestream:$VERSION" \ - "so-suricata:$VERSION" \ - "so-telegraf:$VERSION" \ - "so-thehive:$VERSION" \ - "so-thehive-es:$VERSION" \ - "so-wazuh:$VERSION" \ - "so-zeek:$VERSION" ) - else - TRUSTED_CONTAINERS=( \ - "so-filebeat:$VERSION" \ - "so-idstools:$VERSION" \ - "so-logstash:$VERSION" \ - "so-nginx:$VERSION" \ - "so-redis:$VERSION" \ - "so-steno:$VERSION" \ - "so-suricata:$VERSION" \ - "so-telegraf:$VERSION" \ - "so-zeek:$VERSION" ) - fi - -update_docker_containers +require_manager +update_docker_containers "refresh" diff --git a/salt/common/tools/sbin/so-elastalert-test b/salt/common/tools/sbin/so-elastalert-test index e72d928ed..4e59aacb3 100755 --- a/salt/common/tools/sbin/so-elastalert-test +++ b/salt/common/tools/sbin/so-elastalert-test @@ -19,8 +19,7 @@ # # Purpose: This script will allow you to test your elastalert rule without entering the Docker container. -. /usr/sbin/so-elastic-common - +HOST_RULE_DIR=/opt/so/rules/elastalert OPTIONS="" SKIP=0 RESULTS_TO_LOG="n" @@ -29,114 +28,109 @@ FILE_SAVE_LOCATION="" usage() { -cat < Write results to specified log file - -o '' Specify Elastalert options ( Ex. --schema-only , --count-only, --days N ) - -r Specify path/name of rule to test + -h This message + -a Trigger real alerts instead of the debug alert + -l Write results to specified log file + -o '' Specify Elastalert options ( Ex. --schema-only , --count-only, --days N ) + -r Specify filename of rule to test (must exist in $HOST_RULE_DIR; do not include path) EOF } while getopts "hal:o:r:" OPTION do - case $OPTION in - h) - usage - exit 0 - ;; - a) - OPTIONS="--alert" - ;; - l) - RESULTS_TO_LOG="y" - FILE_SAVE_LOCATION=$OPTARG - ;; - - o) - OPTIONS=$OPTARG - ;; - - r) - RULE_NAME=$OPTARG - SKIP=1 - ;; - *) - usage - exit 0 - ;; - esac + case $OPTION in + h) + usage + exit 0 + ;; + a) + OPTIONS="--alert" + ;; + l) + RESULTS_TO_LOG="y" + FILE_SAVE_LOCATION=$OPTARG + ;; + o) + OPTIONS=$OPTARG + ;; + r) + RULE_NAME=$OPTARG + SKIP=1 + ;; + *) + usage + exit 0 + ;; + esac done docker_exec(){ - if [ ${RESULTS_TO_LOG,,} = "y" ] ; then - docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS" > $FILE_SAVE_LOCATION + CMD="docker exec -it so-elastalert elastalert-test-rule /opt/elastalert/rules/$RULE_NAME --config /opt/config/elastalert_config.yaml $OPTIONS" + if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then + $CMD > "$FILE_SAVE_LOCATION" else - docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS" + $CMD fi } rule_prompt(){ - CURRENT_RULES=$(find /opt/so/rules/elastalert -name "*.yaml") - echo - echo "This script will allow you to test an Elastalert rule." - echo - echo "Below is a list of active Elastalert rules:" - echo + CURRENT_RULES=$(cd "$HOST_RULE_DIR" && find . -type f \( -name "*.yaml" -o -name "*.yml" \) | sed -e 's/^\.\///') + if [ -z "$CURRENT_RULES" ]; then + echo "There are no rules available to test. Rule files must be placed in the $HOST_RULE_DIR directory." + exit 1 + fi + echo + echo "This script will allow you to test an Elastalert rule." + echo + echo "Below is a list of available Elastalert rules:" + echo echo "-----------------------------------" - echo - echo "$CURRENT_RULES" - echo + echo + echo "$CURRENT_RULES" + echo echo "-----------------------------------" - echo - echo "Note: To test a rule it must be accessible by the Elastalert Docker container." - echo - echo "Make sure to swap the local path (/opt/so/rules/elastalert/) for the docker path (/etc/elastalert/rules/)" - echo "Example: /opt/so/rules/elastalert/nids2hive.yaml would be /etc/elastalert/rules/nids2hive.yaml" - echo - while [ -z $RULE_NAME ]; do - echo "Please enter the file path and rule name you want to test." - read -e RULE_NAME + echo + while [ -z "$RULE_NAME" ]; do + read -p "Please enter the rule filename you want to test (filename only, no path): " -e RULE_NAME done } log_save_prompt(){ RESULTS_TO_LOG="" - while [ -z $RESULTS_TO_LOG ]; do - echo "The results can be rather long. Would you like to write the results to a file? (Y/N)" - read RESULTS_TO_LOG - done + read -p "The results can be rather long. Would you like to write the results to a file? (y/N) " -e RESULTS_TO_LOG } log_path_prompt(){ - while [ -z $FILE_SAVE_LOCATION ]; do - echo "Please enter the file path and file name." - read -e FILE_SAVE_LOCATION - done + while [ -z "$FILE_SAVE_LOCATION" ]; do + read -p "Please enter the log file path and file name: " -e FILE_SAVE_LOCATION + done echo "Depending on the rule this may take a while." } if [ $SKIP -eq 0 ]; then rule_prompt log_save_prompt - if [ ${RESULTS_TO_LOG,,} = "y" ] ; then - log_path_prompt - fi + if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then + log_path_prompt + fi fi +echo + docker_exec - -if [ $? -eq 0 ]; then - echo "Test completed successfully!" -else - echo "Something went wrong..." -fi +RESULT=$? echo +if [ $RESULT -eq 0 ]; then + echo "Test completed successfully!" +else + echo "Test failed." +fi - +echo \ No newline at end of file diff --git a/salt/common/tools/sbin/so-elastic-clear b/salt/common/tools/sbin/so-elastic-clear index 432e61c2b..941cc4538 100755 --- a/salt/common/tools/sbin/so-elastic-clear +++ b/salt/common/tools/sbin/so-elastic-clear @@ -51,9 +51,9 @@ if [ $SKIP -ne 1 ]; then # List indices echo {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -k https://{{ NODEIP }}:9200/_cat/indices?v + curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v {% else %} - curl {{ NODEIP }}:9200/_cat/indices?v + curl -L {{ NODEIP }}:9200/_cat/indices?v {% endif %} echo # Inform user we are about to delete all data @@ -94,16 +94,16 @@ fi echo "Deleting data..." {% if grains['role'] in ['so-node','so-heavynode'] %} -INDXS=$(curl -s -XGET -k https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }') +INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }') {% else %} -INDXS=$(curl -s -XGET {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }') +INDXS=$(curl -s -XGET -L {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }') {% endif %} for INDX in ${INDXS} do {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -XDELETE -k https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1 + curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1 {% else %} - curl -XDELETE "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1 + curl -XDELETE -L "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1 {% endif %} done diff --git a/salt/common/tools/sbin/so-elasticsearch-indices-rw b/salt/common/tools/sbin/so-elasticsearch-indices-rw index 6e9eebe47..dc9aee9d8 100755 --- a/salt/common/tools/sbin/so-elasticsearch-indices-rw +++ b/salt/common/tools/sbin/so-elasticsearch-indices-rw @@ -22,5 +22,5 @@ THEHIVEESPORT=9400 echo "Removing read only attributes for indices..." echo for p in $ESPORT $THEHIVEESPORT; do - curl -XPUT -H "Content-Type: application/json" http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi; + curl -XPUT -H "Content-Type: application/json" -L http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi; done diff --git a/salt/common/tools/sbin/so-elasticsearch-pipeline-stats b/salt/common/tools/sbin/so-elasticsearch-pipeline-stats index e1a0bfd3d..a4bc2e220 100755 --- a/salt/common/tools/sbin/so-elasticsearch-pipeline-stats +++ b/salt/common/tools/sbin/so-elasticsearch-pipeline-stats @@ -20,14 +20,14 @@ if [ "$1" == "" ]; then {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines" + curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines" {% else %} - curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines" + curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines" {% endif %} else {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\"" + curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\"" {% else %} - curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\"" + curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\"" {% endif %} fi diff --git a/salt/common/tools/sbin/so-elasticsearch-pipelines-list b/salt/common/tools/sbin/so-elasticsearch-pipelines-list index 58dbf9c9b..d1dda8dee 100755 --- a/salt/common/tools/sbin/so-elasticsearch-pipelines-list +++ b/salt/common/tools/sbin/so-elasticsearch-pipelines-list @@ -18,14 +18,14 @@ . /usr/sbin/so-common if [ "$1" == "" ]; then {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys' + curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys' {% else %} - curl -s {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys' + curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys' {% endif %} else {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq + curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq {% else %} - curl -s {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq + curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq {% endif %} fi diff --git a/salt/common/tools/sbin/so-elasticsearch-templates-list b/salt/common/tools/sbin/so-elasticsearch-templates-list index 85ef27760..a5850534e 100755 --- a/salt/common/tools/sbin/so-elasticsearch-templates-list +++ b/salt/common/tools/sbin/so-elasticsearch-templates-list @@ -18,14 +18,14 @@ . /usr/sbin/so-common if [ "$1" == "" ]; then {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -s -k https://{{ NODEIP }}:9200/_template/* | jq 'keys' + curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys' {% else %} - curl -s {{ NODEIP }}:9200/_template/* | jq 'keys' + curl -s -L {{ NODEIP }}:9200/_template/* | jq 'keys' {% endif %} else {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -s -k https://{{ NODEIP }}:9200/_template/$1 | jq + curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq {% else %} - curl -s {{ NODEIP }}:9200/_template/$1 | jq + curl -s -L {{ NODEIP }}:9200/_template/$1 | jq {% endif %} fi diff --git a/salt/common/tools/sbin/so-elasticsearch-templates-load b/salt/common/tools/sbin/so-elasticsearch-templates-load index 48558af34..76558e17a 100755 --- a/salt/common/tools/sbin/so-elasticsearch-templates-load +++ b/salt/common/tools/sbin/so-elasticsearch-templates-load @@ -31,9 +31,9 @@ COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 240 ]]; do {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" + curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" {% else %} - curl --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" + curl --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" {% endif %} if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" @@ -56,9 +56,9 @@ cd ${ELASTICSEARCH_TEMPLATES} echo "Loading templates..." {% if grains['role'] in ['so-node','so-heavynode'] %} -for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done +for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done {% else %} -for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done +for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done {% endif %} echo diff --git a/salt/common/tools/sbin/so-features-enable b/salt/common/tools/sbin/so-features-enable index 0f2d694fe..d64f22dc2 100755 --- a/salt/common/tools/sbin/so-features-enable +++ b/salt/common/tools/sbin/so-features-enable @@ -15,6 +15,7 @@ # along with this program. If not, see . . /usr/sbin/so-common +. /usr/sbin/so-image-common local_salt_dir=/opt/so/saltstack/local cat << EOF @@ -39,34 +40,14 @@ fi echo "Please wait while switching to Elastic Features." -manager_check() { - # Check to see if this is a manager - MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') - if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch')$ ]]; then - echo "This is a manager. We can proceed" - else - echo "Please run so-features-enable on the manager." - exit 0 - fi -} +require_manager + +TRUSTED_CONTAINERS=( \ + "so-elasticsearch" \ + "so-filebeat" \ + "so-kibana" \ + "so-logstash" ) +update_docker_containers "features" "-features" -manager_check -VERSION=$(lookup_pillar soversion) # Modify global.sls to enable Features sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls -SUFFIX="-features" -TRUSTED_CONTAINERS=( \ - "so-elasticsearch:$VERSION$SUFFIX" \ - "so-filebeat:$VERSION$SUFFIX" \ - "so-kibana:$VERSION$SUFFIX" \ - "so-logstash:$VERSION$SUFFIX" ) - -for i in "${TRUSTED_CONTAINERS[@]}" -do - # Pull down the trusted docker image - echo "Downloading $i" - docker pull --disable-content-trust=false docker.io/$IMAGEREPO/$i - # Tag it with the new registry destination - docker tag $IMAGEREPO/$i $HOSTNAME:5000/$IMAGEREPO/$i - docker push $HOSTNAME:5000/$IMAGEREPO/$i -done diff --git a/salt/common/tools/sbin/so-fleet-user-add b/salt/common/tools/sbin/so-fleet-user-add index 5e2e91fe6..4230a1884 100755 --- a/salt/common/tools/sbin/so-fleet-user-add +++ b/salt/common/tools/sbin/so-fleet-user-add @@ -59,6 +59,6 @@ if [[ $? -eq 0 ]]; then echo "Successfully added user to Fleet" else echo "Unable to add user to Fleet; user might already exist" - echo $resp + echo "$MYSQL_OUTPUT" exit 2 fi \ No newline at end of file diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common new file mode 100755 index 000000000..3449158c0 --- /dev/null +++ b/salt/common/tools/sbin/so-image-common @@ -0,0 +1,175 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# NOTE: This script depends on so-common +IMAGEREPO=securityonion + +container_list() { + MANAGERCHECK=$1 + if [ -z "$MANAGERCHECK" ]; then + MANAGERCHECK=so-unknown + if [ -f /etc/salt/grains ]; then + MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') + fi + fi + + if [ $MANAGERCHECK == 'so-import' ]; then + TRUSTED_CONTAINERS=( \ + "so-elasticsearch" \ + "so-filebeat" \ + "so-idstools" \ + "so-kibana" \ + "so-kratos" \ + "so-nginx" \ + "so-pcaptools" \ + "so-soc" \ + "so-steno" \ + "so-suricata" \ + "so-zeek" ) + elif [ $MANAGERCHECK != 'so-helix' ]; then + TRUSTED_CONTAINERS=( \ + "so-acng" \ + "so-curator" \ + "so-domainstats" \ + "so-elastalert" \ + "so-elasticsearch" \ + "so-filebeat" \ + "so-fleet" \ + "so-fleet-launcher" \ + "so-freqserver" \ + "so-grafana" \ + "so-idstools" \ + "so-influxdb" \ + "so-kibana" \ + "so-kratos" \ + "so-logstash" \ + "so-minio" \ + "so-mysql" \ + "so-nginx" \ + "so-pcaptools" \ + "so-playbook" \ + "so-redis" \ + "so-soc" \ + "so-soctopus" \ + "so-steno" \ + "so-strelka-backend" \ + "so-strelka-filestream" \ + "so-strelka-frontend" \ + "so-strelka-manager" \ + "so-suricata" \ + "so-telegraf" \ + "so-thehive" \ + "so-thehive-cortex" \ + "so-thehive-es" \ + "so-wazuh" \ + "so-zeek" ) + else + TRUSTED_CONTAINERS=( \ + "so-filebeat" \ + "so-idstools" \ + "so-logstash" \ + "so-nginx" \ + "so-redis" \ + "so-steno" \ + "so-suricata" \ + "so-telegraf" \ + "so-zeek" ) + fi +} + +update_docker_containers() { + local CURLTYPE=$1 + local IMAGE_TAG_SUFFIX=$2 + local PROGRESS_CALLBACK=$3 + local LOG_FILE=$4 + + local CONTAINER_REGISTRY=quay.io + local SIGNPATH=/root/sosigs + + if [ -z "$CURLTYPE" ]; then + CURLTYPE=unknown + fi + + if [ -z "$LOG_FILE" ]; then + if [ -c /dev/tty ]; then + LOG_FILE=/dev/tty + else + LOG_FILE=/dev/null + fi + fi + + # Recheck the version for scenarios were the VERSION wasn't known before this script was imported + set_version + set_os + + if [ -z "$TRUSTED_CONTAINERS" ]; then + container_list + fi + + # Let's make sure we have the public key + curl -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS | gpg --import - >> "$LOG_FILE" 2>&1 + + rm -rf $SIGNPATH >> "$LOG_FILE" 2>&1 + mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1 + + # Download the containers from the interwebs + for i in "${TRUSTED_CONTAINERS[@]}" + do + if [ -z "$PROGRESS_CALLBACK" ]; then + echo "Downloading $i" >> "$LOG_FILE" 2>&1 + else + $PROGRESS_CALLBACK $i + fi + + # Pull down the trusted docker image + local image=$i:$VERSION$IMAGE_TAG_SUFFIX + docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 + + # Get signature + curl -A "$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)" https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig >> "$LOG_FILE" 2>&1 + if [[ $? -ne 0 ]]; then + echo "Unable to pull signature file for $image" >> "$LOG_FILE" 2>&1 + exit 1 + fi + # Dump our hash values + DOCKERINSPECT=$(docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$image) + + echo "$DOCKERINSPECT" | jq ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" > $SIGNPATH/$image.txt + echo "$DOCKERINSPECT" | jq ".[0].Created, .[0].RootFS.Layers" >> $SIGNPATH/$image.txt + + if [[ $? -ne 0 ]]; then + echo "Unable to inspect $image" >> "$LOG_FILE" 2>&1 + exit 1 + fi + GPGTEST=$(gpg --verify $SIGNPATH/$image.sig $SIGNPATH/$image.txt 2>&1) + if [[ $? -eq 0 ]]; then + if [[ -z "$SKIP_TAGPUSH" ]]; then + # Tag it with the new registry destination + if [ -z "$HOSTNAME" ]; then + HOSTNAME=$(hostname) + fi + docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 + docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 + fi + else + echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1 + echo "" >> "$LOG_FILE" 2>&1 + echo $GPGTEST >> "$LOG_FILE" 2>&1 + exit 1 + fi + done +} diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap index f10f5fad9..2dc5b0504 100755 --- a/salt/common/tools/sbin/so-import-pcap +++ b/salt/common/tools/sbin/so-import-pcap @@ -27,8 +27,7 @@ function usage { cat << EOF Usage: $0 [pcap-file-2] [pcap-file-N] -Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and -made available for review in the Security Onion toolset. +Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and made available for review in the Security Onion toolset. EOF } diff --git a/salt/common/tools/sbin/so-index-list b/salt/common/tools/sbin/so-index-list index f349cb0d4..0352e7e3e 100755 --- a/salt/common/tools/sbin/so-index-list +++ b/salt/common/tools/sbin/so-index-list @@ -16,7 +16,7 @@ # along with this program. If not, see . {% if grains['role'] in ['so-node','so-heavynode'] %} -curl -X GET -k https://localhost:9200/_cat/indices?v +curl -X GET -k -L https://localhost:9200/_cat/indices?v {% else %} -curl -X GET localhost:9200/_cat/indices?v +curl -X GET -L localhost:9200/_cat/indices?v {% endif %} diff --git a/salt/common/tools/sbin/so-ip-update b/salt/common/tools/sbin/so-ip-update new file mode 100755 index 000000000..9976a42e8 --- /dev/null +++ b/salt/common/tools/sbin/so-ip-update @@ -0,0 +1,63 @@ +#!/bin/bash + +. $(dirname $0)/so-common + +if [ "$FORCE_IP_UPDATE" != "1" ]; then + is_single_node_grid || fail "Cannot update the IP on a distributed grid" +fi + +echo "This tool will update a manager's IP address to the new IP assigned to the management network interface." + +echo +echo "WARNING: This tool is still undergoing testing, use at your own risk!" +echo + +if [ -z "$OLD_IP" ]; then + OLD_IP=$(lookup_pillar "managerip") + + if [ -z "$OLD_IP" ]; then + fail "Unable to find old IP; possible salt system failure" + fi + + echo "Found old IP $OLD_IP." +fi + +if [ -z "$NEW_IP" ]; then + iface=$(lookup_pillar "mainint" "host") + NEW_IP=$(ip -4 addr list $iface | grep inet | cut -d' ' -f6 | cut -d/ -f1) + + if [ -z "$NEW_IP" ]; then + fail "Unable to detect new IP on interface $iface. " + fi + + echo "Detected new IP $NEW_IP on interface $iface." +fi + +if [ "$OLD_IP" == "$NEW_IP" ]; then + fail "IP address has not changed" +fi + +echo "About to change old IP $OLD_IP to new IP $NEW_IP." + +echo +read -n 1 -p "Would you like to continue? (y/N) " CONTINUE +echo + +if [ "$CONTINUE" == "y" ]; then + for file in $(grep -rlI $OLD_IP /opt/so/saltstack /etc); do + echo "Updating file: $file" + sed -i "s|$OLD_IP|$NEW_IP|g" $file + done + + echo "The IP has been changed from $OLD_IP to $NEW_IP." + + echo + read -n 1 -p "The system must reboot to ensure all services have restarted with the new configuration. Reboot now? (y/N)" CONTINUE + echo + + if [ "$CONTINUE" == "y" ]; then + reboot + fi +else + echo "Exiting without changes." +fi \ No newline at end of file diff --git a/salt/common/tools/sbin/so-kibana-config-export b/salt/common/tools/sbin/so-kibana-config-export index 7f578a3ba..6dd82a10a 100755 --- a/salt/common/tools/sbin/so-kibana-config-export +++ b/salt/common/tools/sbin/so-kibana-config-export @@ -23,7 +23,7 @@ KIBANA_HOST={{ MANAGER }} KSO_PORT=5601 OUTFILE="saved_objects.ndjson" -curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE +curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -L $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE # Clean up using PLACEHOLDER sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE diff --git a/salt/common/tools/sbin/so-pcap-import b/salt/common/tools/sbin/so-pcap-import new file mode 100755 index 000000000..667bf064e --- /dev/null +++ b/salt/common/tools/sbin/so-pcap-import @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +$(dirname $0)/so-import-pcap $@ diff --git a/salt/common/tools/sbin/so-playbook-reset b/salt/common/tools/sbin/so-playbook-reset new file mode 100755 index 000000000..f07df2142 --- /dev/null +++ b/salt/common/tools/sbin/so-playbook-reset @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /usr/sbin/so-common + +salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create + +/usr/sbin/so-soctopus-restart + +echo "Importing Plays - this will take some time...." +wait 5 +/usr/sbin/so-playbook-ruleupdate \ No newline at end of file diff --git a/salt/common/tools/sbin/so-salt-minion-check b/salt/common/tools/sbin/so-salt-minion-check new file mode 100644 index 000000000..a8828b16e --- /dev/null +++ b/salt/common/tools/sbin/so-salt-minion-check @@ -0,0 +1,104 @@ +{% import_yaml 'salt/minion.defaults.yaml' as SALT_MINION_DEFAULTS -%} + +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# this script checks the time the file /opt/so/log/salt/state-apply-test was last modified and restarts the salt-minion service if it is outside a threshold date/time +# the file is modified via file.touch using a scheduled job healthcheck.salt-minion.state-apply-test that runs a state.apply. +# by default the file should be updated every 5-8 minutes. +# this allows us to test that the minion is able apply states and communicate with the master +# if the file is unable to be touched via the state.apply, then we assume there is a possibilty that the minion is hung (though it could be possible the master is down as well) +# we then stop the service, pkill salt-minion, the start the salt-minion service back up + +. /usr/sbin/so-common + +QUIET=false +UPTIME_REQ=1800 #in seconds, how long the box has to be up before considering restarting salt-minion due to /opt/so/log/salt/state-apply-test not being touched +CURRENT_TIME=$(date +%s) +SYSTEM_START_TIME=$(date -d "$(> "/opt/so/log/salt/so-salt-minion-check" +} + +log() { + msg=$1 + level=${2:-I} + now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ") + if ! $QUIET; then + echo $msg + fi + echo -e "$now | $level | $msg" >> "/opt/so/log/salt/so-salt-minion-check" 2>&1 +} + +error() { + log "$1" "E" +} + +info() { + log "$1" "I" +} + +usage() +{ +cat <> /etc/ssh/sshd_config +if ! [[ $quiet ]]; then + print_sshd_t "ciphers" "After" + echo "" +fi + +if [[ $before != $after ]]; then + reload_required=true +fi + +if ! [[ $quiet ]]; then print_sshd_t "kexalgorithms" "Before"; fi +sshd -T | grep "^kexalgorithms" | sed -e "s/\(diffie-hellman-group14-sha1\|ecdh-sha2-nistp256\|diffie-hellman-group-exchange-sha256\|diffie-hellman-group1-sha1\|diffie-hellman-group-exchange-sha1\|ecdh-sha2-nistp521\|ecdh-sha2-nistp384\)\,\?//g" >> /etc/ssh/sshd_config +if ! [[ $quiet ]]; then + print_sshd_t "kexalgorithms" "After" + echo "" +fi + +if [[ $before != $after ]]; then + reload_required=true +fi + +if ! [[ $quiet ]]; then print_sshd_t "macs" "Before"; fi +sshd -T | grep "^macs" | sed -e "s/\(hmac-sha2-512,\|umac-128@openssh.com,\|hmac-sha2-256,\|umac-64@openssh.com,\|hmac-sha1,\|hmac-sha1-etm@openssh.com,\|umac-64-etm@openssh.com,\|hmac-sha1\)//g" >> /etc/ssh/sshd_config +if ! [[ $quiet ]]; then + print_sshd_t "macs" "After" + echo "" +fi + +if [[ $before != $after ]]; then + reload_required=true +fi + +if ! [[ $quiet ]]; then print_sshd_t "hostkeyalgorithms" "Before"; fi +sshd -T | grep "^hostkeyalgorithms" | sed "s|ecdsa-sha2-nistp256,||g" | sed "s|ssh-rsa,||g" >> /etc/ssh/sshd_config +if ! [[ $quiet ]]; then + print_sshd_t "hostkeyalgorithms" "After" + echo "" +fi + +if [[ $before != $after ]]; then + reload_required=true +fi + +if [[ $reload_required == true ]]; then + print_msg "Reloading sshd to load config changes..." + systemctl reload sshd +fi + +{% if grains['os'] != 'CentOS' %} +print_msg "[ WARNING ] Any new ssh sessions will need to remove and reaccept the ECDSA key for this server before reconnecting." +{% endif %} + diff --git a/salt/common/tools/sbin/so-status b/salt/common/tools/sbin/so-status index 519d9f39d..8dd607bd6 100755 --- a/salt/common/tools/sbin/so-status +++ b/salt/common/tools/sbin/so-status @@ -14,8 +14,6 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{%- from 'common/maps/so-status.map.jinja' import docker with context %} -{%- set container_list = docker['containers'] | sort | unique %} if ! [ "$(id -u)" = 0 ]; then echo "This command must be run as root" @@ -23,19 +21,24 @@ if ! [ "$(id -u)" = 0 ]; then fi # Constants +SYSTEM_START_TIME=$(date -d "$( [email]" @@ -56,14 +56,14 @@ function verifyEnvironment() { require "openssl" require "sqlite3" [[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable" - response=$(curl -Ss ${kratosUrl}/) + response=$(curl -Ss -L ${kratosUrl}/) [[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable" } function findIdByEmail() { email=$1 - response=$(curl -Ss ${kratosUrl}/identities) + response=$(curl -Ss -L ${kratosUrl}/identities) identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id") echo $identityId } @@ -113,7 +113,7 @@ function updatePassword() { } function listUsers() { - response=$(curl -Ss ${kratosUrl}/identities) + response=$(curl -Ss -L ${kratosUrl}/identities) [[ $? != 0 ]] && fail "Unable to communicate with Kratos" echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort @@ -131,7 +131,7 @@ function createUser() { EOF ) - response=$(curl -Ss ${kratosUrl}/identities -d "$addUserJson") + response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson") [[ $? != 0 ]] && fail "Unable to communicate with Kratos" identityId=$(echo "${response}" | jq ".id") @@ -153,7 +153,7 @@ function updateStatus() { identityId=$(findIdByEmail "$email") [[ ${identityId} == "" ]] && fail "User not found" - response=$(curl -Ss "${kratosUrl}/identities/$identityId") + response=$(curl -Ss -L "${kratosUrl}/identities/$identityId") [[ $? != 0 ]] && fail "Unable to communicate with Kratos" oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath") @@ -171,7 +171,7 @@ function updateStatus() { fi updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)") - response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson") + response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson") [[ $? != 0 ]] && fail "Unable to mark user as locked" } @@ -191,7 +191,7 @@ function deleteUser() { identityId=$(findIdByEmail "$email") [[ ${identityId} == "" ]] && fail "User not found" - response=$(curl -Ss -XDELETE "${kratosUrl}/identities/$identityId") + response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId") [[ $? != 0 ]] && fail "Unable to communicate with Kratos" } diff --git a/salt/common/tools/sbin/so-wazuh-user-add b/salt/common/tools/sbin/so-wazuh-user-add new file mode 100755 index 000000000..836e45959 --- /dev/null +++ b/salt/common/tools/sbin/so-wazuh-user-add @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd /var/ossec/api/configuration/auth/user $1 diff --git a/salt/common/tools/sbin/so-wazuh-user-passwd b/salt/common/tools/sbin/so-wazuh-user-passwd new file mode 100755 index 000000000..836e45959 --- /dev/null +++ b/salt/common/tools/sbin/so-wazuh-user-passwd @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd /var/ossec/api/configuration/auth/user $1 diff --git a/salt/common/tools/sbin/so-wazuh-user-remove b/salt/common/tools/sbin/so-wazuh-user-remove new file mode 100755 index 000000000..a70450f04 --- /dev/null +++ b/salt/common/tools/sbin/so-wazuh-user-remove @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd -D /var/ossec/api/configuration/auth/user $1 diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 07848a31c..27439a137 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -16,24 +16,22 @@ # along with this program. If not, see . . /usr/sbin/so-common + UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) DEFAULT_SALT_DIR=/opt/so/saltstack/default BATCHSIZE=5 SOUP_LOG=/root/soup.log + exec 3>&1 1>${SOUP_LOG} 2>&1 -manager_check() { - # Check to see if this is a manager - MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') - if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch'|'so-import')$ ]]; then - echo "This is a manager. We can proceed." - MINIONID=$(salt-call grains.get id --out=txt|awk -F: {'print $2'}|tr -d ' ') - else - echo "Please run soup on the manager. The manager controls all updates." - exit 0 - fi +add_common() { + cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ + cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ + salt-call state.apply common queue=True + echo "Run soup one more time" + exit 0 } airgap_mounted() { @@ -79,6 +77,30 @@ airgap_mounted() { fi } +airgap_update_dockers() { + if [ $is_airgap -eq 0 ]; then + # Let's copy the tarball + if [ ! -f $AGDOCKER/registry.tar ]; then + echo "Unable to locate registry. Exiting" + exit 1 + else + echo "Stopping the registry docker" + docker stop so-dockerregistry + docker rm so-dockerregistry + echo "Copying the new dockers over" + tar xvf $AGDOCKER/registry.tar -C /nsm/docker-registry/docker + echo "Add Registry back" + docker load -i $AGDOCKER/registry_image.tar + fi + fi +} + +update_registry() { + docker stop so-dockerregistry + docker rm so-dockerregistry + salt-call state.apply registry queue=True +} + check_airgap() { # See if this is an airgap install AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap | awk '{print $2}') @@ -92,6 +114,12 @@ check_airgap() { fi } +check_sudoers() { + if grep -q "so-setup" /etc/sudoers; then + echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"." + fi +} + clean_dockers() { # Place Holder for cleaning up old docker images echo "Trying to clean up old dockers." @@ -100,7 +128,6 @@ clean_dockers() { } clone_to_tmp() { - # TODO Need to add a air gap option # Clean old files rm -rf /tmp/sogh # Make a temp location for the files @@ -128,21 +155,9 @@ copy_new_files() { cd /tmp } -detect_os() { - # Detect Base OS - echo "Determining Base OS." >> "$SOUP_LOG" 2>&1 - if [ -f /etc/redhat-release ]; then - OS="centos" - elif [ -f /etc/os-release ]; then - OS="ubuntu" - fi - echo "Found OS: $OS" >> "$SOUP_LOG" 2>&1 -} - highstate() { - # Run a highstate but first cancel a running one. - salt-call saltutil.kill_all_jobs - salt-call state.highstate -l info + # Run a highstate. + salt-call state.highstate -l info queue=True } masterlock() { @@ -182,7 +197,6 @@ pillar_changes() { [[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2 [[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3 [[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0 - } rc1_to_rc2() { @@ -283,113 +297,15 @@ unmount_update() { umount /tmp/soagupdate } + update_centos_repo() { # Update the files in the repo echo "Syncing new updates to /nsm/repo" - rsync -a $AGDOCKER/repo /nsm/repo + rsync -av $AGREPO/* /nsm/repo/ echo "Creating repo" createrepo /nsm/repo } -update_dockers() { - if [ $is_airgap -eq 0 ]; then - # Let's copy the tarball - if [ ! -f $AGDOCKER/registry.tar ]; then - echo "Unable to locate registry. Exiting" - exit 0 - else - echo "Stopping the registry docker" - docker stop so-dockerregistry - docker rm so-dockerregistry - echo "Copying the new dockers over" - tar xvf $AGDOCKER/registry.tar -C /nsm/docker-registry/docker - fi - else - # List all the containers - if [ $MANAGERCHECK == 'so-import' ]; then - TRUSTED_CONTAINERS=( \ - "so-idstools" \ - "so-nginx" \ - "so-filebeat" \ - "so-suricata" \ - "so-soc" \ - "so-elasticsearch" \ - "so-kibana" \ - "so-kratos" \ - "so-suricata" \ - "so-registry" \ - "so-pcaptools" \ - "so-zeek" ) - elif [ $MANAGERCHECK != 'so-helix' ]; then - TRUSTED_CONTAINERS=( \ - "so-acng" \ - "so-thehive-cortex" \ - "so-curator" \ - "so-domainstats" \ - "so-elastalert" \ - "so-elasticsearch" \ - "so-filebeat" \ - "so-fleet" \ - "so-fleet-launcher" \ - "so-freqserver" \ - "so-grafana" \ - "so-idstools" \ - "so-influxdb" \ - "so-kibana" \ - "so-kratos" \ - "so-logstash" \ - "so-minio" \ - "so-mysql" \ - "so-nginx" \ - "so-pcaptools" \ - "so-playbook" \ - "so-redis" \ - "so-soc" \ - "so-soctopus" \ - "so-steno" \ - "so-strelka-frontend" \ - "so-strelka-manager" \ - "so-strelka-backend" \ - "so-strelka-filestream" \ - "so-suricata" \ - "so-telegraf" \ - "so-thehive" \ - "so-thehive-es" \ - "so-wazuh" \ - "so-zeek" ) - else - TRUSTED_CONTAINERS=( \ - "so-filebeat" \ - "so-idstools" \ - "so-logstash" \ - "so-nginx" \ - "so-redis" \ - "so-steno" \ - "so-suricata" \ - "so-telegraf" \ - "so-zeek" ) - fi - -# Download the containers from the interwebs - for i in "${TRUSTED_CONTAINERS[@]}" - do - # Pull down the trusted docker image - echo "Downloading $i:$NEWVERSION" - docker pull --disable-content-trust=false docker.io/$IMAGEREPO/$i:$NEWVERSION - # Tag it with the new registry destination - docker tag $IMAGEREPO/$i:$NEWVERSION $HOSTNAME:5000/$IMAGEREPO/$i:$NEWVERSION - docker push $HOSTNAME:5000/$IMAGEREPO/$i:$NEWVERSION - done - fi - # Cleanup on Aisle 4 - clean_dockers - echo "Add Registry back if airgap" - if [ $is_airgap -eq 0 ]; then - docker load -i $AGDOCKER/registry_image.tar - fi - -} - update_version() { # Update the version to the latest echo "Updating the Security Onion version file." @@ -411,6 +327,10 @@ upgrade_check_salt() { if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then echo "You are already running the correct version of Salt for Security Onion." else + UPGRADESALT=1 + fi +} +upgrade_salt() { SALTUPGRADED=True echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" @@ -421,7 +341,11 @@ upgrade_check_salt() { yum versionlock delete "salt-*" echo "Updating Salt packages and restarting services." echo "" - sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" + if [ $is_airgap -eq 0 ]; then + sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION" + else + sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" + fi echo "Applying yum versionlock for Salt." echo "" yum versionlock add "salt-*" @@ -441,7 +365,6 @@ upgrade_check_salt() { apt-mark hold "salt-master" apt-mark hold "salt-minion" fi - fi } verify_latest_update_script() { @@ -478,13 +401,14 @@ done echo "Checking to see if this is a manager." echo "" -manager_check +require_manager +set_minionid echo "Checking to see if this is an airgap install" echo "" check_airgap echo "Found that Security Onion $INSTALLEDVERSION is currently installed." echo "" -detect_os +set_os echo "" if [ $is_airgap -eq 0 ]; then # Let's mount the ISO since this is airgap @@ -493,6 +417,12 @@ else echo "Cloning Security Onion github repo into $UPDATE_DIR." clone_to_tmp fi +if [ -f /usr/sbin/so-image-common ]; then + . /usr/sbin/so-image-common +else +add_common +fi + echo "" echo "Verifying we have the latest soup script." verify_latest_update_script @@ -502,29 +432,60 @@ echo "Let's see if we need to update Security Onion." upgrade_check space_check +echo "Checking for Salt Master and Minion updates." +upgrade_check_salt + echo "" echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." echo "" +echo "Updating dockers to $NEWVERSION." +if [ $is_airgap -eq 0 ]; then + airgap_update_dockers +else + update_registry + update_docker_containers "soup" +fi +echo "" echo "Stopping Salt Minion service." systemctl stop salt-minion +echo "Killing any remaining Salt Minion processes." +pkill -9 -ef /usr/bin/salt-minion echo "" echo "Stopping Salt Master service." systemctl stop salt-master echo "" -echo "Checking for Salt Master and Minion updates." -upgrade_check_salt +# Does salt need upgraded. If so update it. +if [ "$UPGRADESALT" == "1" ]; then + echo "Upgrading Salt" + # Update the repo files so it can actually upgrade + if [ $is_airgap -eq 0 ]; then + update_centos_repo + yum clean all + fi + upgrade_salt +fi + +echo "Checking if Salt was upgraded." +echo "" +# Check that Salt was upgraded +if [[ $(salt --versions-report | grep Salt: | awk {'print $2'}) != "$NEWSALTVERSION" ]]; then + echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG." + echo "Once the issue is resolved, run soup again." + echo "Exiting." + echo "" + exit 1 +else + echo "Salt upgrade success." + echo "" +fi echo "Making pillar changes." pillar_changes echo "" -echo "" -echo "Updating dockers to $NEWVERSION." -update_dockers - # Only update the repo if its airgap -if [ $is_airgap -eq 0 ]; then +if [[ $is_airgap -eq 0 ]] && [[ "$UPGRADESALT" != "1" ]]; then update_centos_repo fi @@ -542,9 +503,19 @@ echo "" echo "Starting Salt Master service." systemctl start salt-master +# Only regenerate osquery packages if Fleet is enabled +FLEET_MANAGER=$(lookup_pillar fleet_manager) +FLEET_NODE=$(lookup_pillar fleet_node) +if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then + echo "" + echo "Regenerating Osquery Packages.... This will take several minutes." + salt-call state.apply fleet.event_gen-packages -l info queue=True + echo "" +fi + echo "" echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." -highstate +salt-call state.highstate -l info queue=True echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." @@ -557,18 +528,23 @@ masterunlock echo "" echo "Starting Salt Master service." systemctl start salt-master -highstate +echo "Running a highstate. This could take several minutes." +salt-call state.highstate -l info queue=True playbook unmount_update -SALTUPGRADED="True" -if [[ "$SALTUPGRADED" == "True" ]]; then +if [ "$UPGRADESALT" == "1" ]; then echo "" echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION." - salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion + if [ $is_airgap -eq 0 ]; then + salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' cmd.run "yum clean all" + fi + salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion queue=True echo "" fi +check_sudoers + } main "$@" | tee /dev/fd/3 diff --git a/salt/curator/files/bin/so-curator-close b/salt/curator/files/bin/so-curator-close index 11324dd31..682653ce4 100644 --- a/salt/curator/files/bin/so-curator-close +++ b/salt/curator/files/bin/so-curator-close @@ -1,2 +1,27 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=close +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + /usr/sbin/so-curator-closed-delete > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1 diff --git a/salt/curator/files/bin/so-curator-closed-delete b/salt/curator/files/bin/so-curator-closed-delete index 8f6d0a8ea..714aa5f6f 100755 --- a/salt/curator/files/bin/so-curator-closed-delete +++ b/salt/curator/files/bin/so-curator-closed-delete @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -34,6 +34,13 @@ #fi # Avoid starting multiple instances -if ! pgrep -f "so-curator-closed-delete-delete" >/dev/null; then - /usr/sbin/so-curator-closed-delete-delete -fi +APP=closeddelete +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + +/usr/sbin/so-curator-closed-delete-delete diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete index 8909512db..c892bf23f 100755 --- a/salt/curator/files/bin/so-curator-closed-delete-delete +++ b/salt/curator/files/bin/so-curator-closed-delete-delete @@ -26,41 +26,36 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -#. /usr/sbin/so-elastic-common -#. /etc/nsm/securityonion.conf - LOG="/opt/so/log/curator/so-curator-closed-delete.log" +overlimit() { + + [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] +} + +closedindices() { + + INDICES=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null) + [ $? -eq 1 ] && return false + echo ${INDICES} | grep -q -E "(logstash-|so-)" +} + # Check for 2 conditions: # 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT? -# 2. Are there any closed logstash- or so- indices that we can delete? +# 2. Are there any closed indices that we can delete? # If both conditions are true, keep on looping until one of the conditions is false. -while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] && -{% if grains['role'] in ['so-node','so-heavynode'] %} -curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do -{% else %} -curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do -{% endif %} +while overlimit && closedindices; do - # We need to determine OLDEST_INDEX. - # First, get the list of closed indices that are prefixed with "logstash-" or "so-". - # For example: logstash-ids-YYYY.MM.DD + # We need to determine OLDEST_INDEX: + # First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed. # Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field. # Finally, select the first entry in that sorted list. - {% if grains['role'] in ['so-node','so-heavynode'] %} - OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1) - {% else %} - OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1) - {% endif %} + OLDEST_INDEX=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1) # Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it. - {% if grains['role'] in ['so-node','so-heavynode'] %} - curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX} - {% else %} - curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX} - {% endif %} + curl -XDELETE -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX} # Finally, write a log entry that says we deleted it. echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG} -done +done \ No newline at end of file diff --git a/salt/curator/files/bin/so-curator-delete b/salt/curator/files/bin/so-curator-delete index 166497855..6a85eddb4 100644 --- a/salt/curator/files/bin/so-curator-delete +++ b/salt/curator/files/bin/so-curator-delete @@ -1,2 +1,27 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=delete +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/delete.yml > /dev/null 2>&1 diff --git a/salt/curator/init.sls b/salt/curator/init.sls index 31f738349..2f0147794 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -127,6 +127,12 @@ so-curator: - /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro - /opt/so/conf/curator/action/:/etc/curator/action:ro - /opt/so/log/curator:/var/log/curator:rw + +append_so-curator_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-curator + # Begin Curator Cron Jobs # Close diff --git a/salt/docker_clean/init.sls b/salt/docker_clean/init.sls index 795b96e3a..61499cdb5 100644 --- a/salt/docker_clean/init.sls +++ b/salt/docker_clean/init.sls @@ -1,6 +1,6 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0']%} +{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1']%} {% for VERSION in OLDVERSIONS %} remove_images_{{ VERSION }}: diff --git a/salt/domainstats/init.sls b/salt/domainstats/init.sls index daac87387..965d87426 100644 --- a/salt/domainstats/init.sls +++ b/salt/domainstats/init.sls @@ -43,19 +43,24 @@ dstatslogdir: so-domainstatsimage: cmd.run: - - name: docker pull --disable-content-trust=false docker.io/{{ IMAGEREPO }}/so-domainstats:HH1.0.3 + - name: docker pull {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }} so-domainstats: docker_container.running: - require: - so-domainstatsimage - - image: docker.io/{{ IMAGEREPO }}/so-domainstats:HH1.0.3 + - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }} - hostname: domainstats - name: so-domainstats - user: domainstats - binds: - /opt/so/log/domainstats:/var/log/domain_stats +append_so-domainstats_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-domainstats + {% else %} domainstats_state_not_allowed: diff --git a/salt/elastalert/files/modules/so/playbook-es.py b/salt/elastalert/files/modules/so/playbook-es.py index 31a58b44b..cf29c0669 100644 --- a/salt/elastalert/files/modules/so/playbook-es.py +++ b/salt/elastalert/files/modules/so/playbook-es.py @@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter): today = strftime("%Y.%m.%d", gmtime()) timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime()) headers = {"Content-Type": "application/json"} - payload = {"rule": { "name": self.rule['play_title'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp} + payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp} url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/" requests.post(url, data=json.dumps(payload), headers=headers, verify=False) diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls index 2e757805c..7caef532f 100644 --- a/salt/elastalert/init.sls +++ b/salt/elastalert/init.sls @@ -121,6 +121,12 @@ so-elastalert: - {{MANAGER_URL}}:{{MANAGER_IP}} - require: - module: wait_for_elasticsearch + +append_so-elastalert_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-elastalert + {% endif %} {% else %} diff --git a/salt/elasticsearch/files/ingest/osquery.query_result b/salt/elasticsearch/files/ingest/osquery.query_result index 3a6ed15a3..67a0b39f8 100644 --- a/salt/elasticsearch/files/ingest/osquery.query_result +++ b/salt/elasticsearch/files/ingest/osquery.query_result @@ -6,7 +6,7 @@ { "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } }, { "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } }, { "json": { "field": "winlog.data", "target_field": "temp", "ignore_failure": true } }, - { "rename": { "field": "temp.Data", "target_field": "winlog.event_data", "ignore_missing": true } }, + { "rename": { "field": "temp.EventData", "target_field": "winlog.event_data", "ignore_missing": true } }, { "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } }, { "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } }, { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, @@ -22,4 +22,4 @@ { "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} }, { "pipeline": { "name": "common" } } ] -} \ No newline at end of file +} diff --git a/salt/elasticsearch/files/ingest/strelka.file b/salt/elasticsearch/files/ingest/strelka.file index 06e2d5cb0..e5e8560f8 100644 --- a/salt/elasticsearch/files/ingest/strelka.file +++ b/salt/elasticsearch/files/ingest/strelka.file @@ -6,15 +6,27 @@ { "rename": { "field": "message2.scan", "target_field": "scan", "ignore_missing": true } }, { "rename": { "field": "message2.request", "target_field": "request", "ignore_missing": true } }, { "rename": { "field": "scan.hash", "target_field": "hash", "ignore_missing": true } }, - + { "rename": { "field": "scan.exiftool", "target_field": "exiftool", "ignore_missing": true } }, { "grok": { "if": "ctx.request?.attributes?.filename != null", "field": "request.attributes.filename", "patterns": ["-%{WORD:log.id.fuid}-"], "ignore_failure": true } }, { "foreach": { - "if": "ctx.scan?.exiftool?.keys !=null", - "field": "scan.exiftool.keys", - "processor":{ + "if": "ctx.exiftool?.keys !=null", + "field": "exiftool.keys", + "processor": { + "append": { + "field": "scan.exiftool", + "value": "{{_ingest._value.key}}={{_ingest._value.value}}" + } + } + } + }, + { "foreach": + { + "if": "ctx.exiftool?.keys !=null", + "field": "exiftool.keys", + "processor": { "set": { - "field": "scan.exiftool.{{_ingest._value.key}}", + "field": "exiftool.{{_ingest._value.key}}", "value": "{{_ingest._value.value}}" } } @@ -32,6 +44,14 @@ } } }, + { "set": { "if": "ctx.exiftool?.SourceFile != null", "field": "file.source", "value": "{{exiftool.SourceFile}}", "ignore_failure": true }}, + { "set": { "if": "ctx.exiftool?.FilePermissions != null", "field": "file.permissions", "value": "{{exiftool.FilePermissions}}", "ignore_failure": true }}, + { "set": { "if": "ctx.exiftool?.FileName != null", "field": "file.name", "value": "{{exiftool.FileName}}", "ignore_failure": true }}, + { "set": { "if": "ctx.exiftool?.FileModifyDate != null", "field": "file.mtime", "value": "{{exiftool.FileModifyDate}}", "ignore_failure": true }}, + { "set": { "if": "ctx.exiftool?.FileAccessDate != null", "field": "file.accessed", "value": "{{exiftool.FileAccessDate}}", "ignore_failure": true }}, + { "set": { "if": "ctx.exiftool?.FileInodeChangeDate != null", "field": "file.ctime", "value": "{{exiftool.FileInodeChangeDate}}", "ignore_failure": true }}, + { "set": { "if": "ctx.exiftool?.FileDirectory != null", "field": "file.directory", "value": "{{exiftool.FileDirectory}}", "ignore_failure": true }}, + { "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }}, { "set": { "if": "ctx.scan?.yara?.matches != null", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }}, { "set": { "if": "ctx.scan?.yara?.matches != null", "field": "dataset", "value": "alert", "override": true }}, { "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }}, @@ -42,7 +62,8 @@ { "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 70 && ctx.rule?.score <=89", "field": "event.severity", "value": 3, "override": true } }, { "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 90", "field": "event.severity", "value": 4, "override": true } }, { "set": { "field": "observer.name", "value": "{{agent.name}}" }}, - { "remove": { "field": ["host", "path", "message", "scan.exiftool.keys", "scan.yara.meta"], "ignore_missing": true } }, + { "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }}, + { "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } }, { "pipeline": { "name": "common" } } ] } diff --git a/salt/elasticsearch/files/ingest/syslog b/salt/elasticsearch/files/ingest/syslog index b4e09e9df..b08a62187 100644 --- a/salt/elasticsearch/files/ingest/syslog +++ b/salt/elasticsearch/files/ingest/syslog @@ -12,9 +12,25 @@ "ignore_failure": true } }, - { "grok": { "field": "message", "patterns": ["<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}: %{GREEDYDATA:real_message}"], "ignore_failure": false } }, - { "set": { "if": "ctx.source.application == 'filterlog'", "field": "dataset", "value": "firewall" } }, - { "pipeline": { "if": "ctx.dataset == 'firewall'", "name": "filterlog" } }, + { + "grok": + { + "field": "message", + "patterns": [ + "^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}: %{GREEDYDATA:real_message}$", + "^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$" + ], + "ignore_failure": true + } + }, + { "set": { "if": "ctx.source?.application == 'filterlog'", "field": "dataset", "value": "firewall", "ignore_failure": true } }, + { "set": { "if": "ctx.vendor != null", "field": "module", "value": "{{ vendor }}", "ignore_failure": true } }, + { "set": { "if": "ctx.product != null", "field": "dataset", "value": "{{ product }}", "ignore_failure": true } }, + { "set": { "field": "ingest.timestamp", "value": "{{ @timestamp }}" } }, + { "date": { "if": "ctx.syslog?.timestamp != null", "field": "syslog.timestamp", "target_field": "@timestamp", "formats": ["MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601", "UNIX"], "ignore_failure": true } }, + { "remove": { "field": ["pid", "program"], "ignore_missing": true, "ignore_failure": true } }, + { "pipeline": { "if": "ctx.vendor != null && ctx.product != null", "name": "{{ vendor }}.{{ product }}", "ignore_failure": true } }, + { "pipeline": { "if": "ctx.dataset == 'firewall'", "name": "filterlog", "ignore_failure": true } }, { "pipeline": { "name": "common" } } ] } diff --git a/salt/elasticsearch/files/ingest/sysmon b/salt/elasticsearch/files/ingest/sysmon index 2ca5c6193..599899488 100644 --- a/salt/elasticsearch/files/ingest/sysmon +++ b/salt/elasticsearch/files/ingest/sysmon @@ -30,40 +30,40 @@ { "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.image", "target_field": "process.executable", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.image", "target_field": "process.executable", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.processID", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.processGuid", "target_field": "process.entity_id", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.processID", "target_field": "process.pid", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ProcessId", "target_field": "process.pid", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.processGuid", "target_field": "process.entity_id", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.commandLine", "target_field": "process.command_line", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.commandLine", "target_field": "process.command_line", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.currentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.currentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.description", "target_field": "process.pe.description", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.description", "target_field": "process.pe.description", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.product", "target_field": "process.pe.product", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.product", "target_field": "process.pe.product", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.company", "target_field": "process.pe.company", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.company", "target_field": "process.pe.company", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.Company", "target_field": "process.pe.company", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.originalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.originalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.fileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.fileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.parentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.parentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.parentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.parentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.parentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.parentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.parentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.parentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.TargetFilename", "target_field": "file.target", "ignore_missing": true } } + { "rename": { "field": "winlog.event_data.TargetFilename", "target_field": "file.target", "ignore_missing": true } } ] } diff --git a/salt/elasticsearch/files/so-elasticsearch-pipelines b/salt/elasticsearch/files/so-elasticsearch-pipelines index eed62da24..dce6a081b 100755 --- a/salt/elasticsearch/files/so-elasticsearch-pipelines +++ b/salt/elasticsearch/files/so-elasticsearch-pipelines @@ -28,9 +28,9 @@ COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 240 ]]; do {% if grains['role'] in ['so-node','so-heavynode'] %} - curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" + curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" {% else %} - curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" + curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" {% endif %} if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" @@ -52,9 +52,9 @@ cd ${ELASTICSEARCH_INGEST_PIPELINES} echo "Loading pipelines..." {% if grains['role'] in ['so-node','so-heavynode'] %} -for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done +for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done {% else %} -for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done +for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done {% endif %} echo diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 1406df02c..0b28ee6d1 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -215,13 +215,17 @@ so-elasticsearch: - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro - /opt/so/conf/elasticsearch/sotls.yml:/usr/share/elasticsearch/config/sotls.yml:ro - - watch: - file: cacertz - file: esyml - file: esingestconf - file: so-elasticsearch-pipelines-file +append_so-elasticsearch_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-elasticsearch + so-elasticsearch-pipelines-file: file.managed: - name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines diff --git a/salt/elasticsearch/templates/so/so-common-template.json b/salt/elasticsearch/templates/so/so-common-template.json index 7db65f62c..74ff3748a 100644 --- a/salt/elasticsearch/templates/so/so-common-template.json +++ b/salt/elasticsearch/templates/so/so-common-template.json @@ -379,9 +379,14 @@ } } }, - "scan":{ + "scan":{ "type":"object", - "dynamic": true + "dynamic": true, + "properties":{ + "exiftool":{ + "type":"text" + } + } }, "server":{ "type":"object", diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml index 3587b6ffd..799a37337 100644 --- a/salt/filebeat/etc/filebeat.yml +++ b/salt/filebeat/etc/filebeat.yml @@ -115,7 +115,7 @@ filebeat.inputs: fields: ["source", "prospector", "input", "offset", "beat"] fields_under_root: true - clean_removed: false + clean_removed: true close_removed: false - type: log diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index b770f7cc8..98229ca35 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -58,8 +58,8 @@ filebeatconfsync: file.managed: - name: /opt/so/conf/filebeat/etc/filebeat.yml - source: salt://filebeat/etc/filebeat.yml - - user: 0 - - group: 0 + - user: root + - group: root - template: jinja - defaults: INPUTS: {{ salt['pillar.get']('filebeat:config:inputs', {}) }} @@ -86,6 +86,11 @@ so-filebeat: - watch: - file: /opt/so/conf/filebeat/etc/filebeat.yml +append_so-filebeat_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-filebeat + {% else %} filebeat_state_not_allowed: diff --git a/salt/firewall/hostgroups.yaml b/salt/firewall/hostgroups.yaml index 5ff6b900b..778912911 100644 --- a/salt/firewall/hostgroups.yaml +++ b/salt/firewall/hostgroups.yaml @@ -1,3 +1,4 @@ +{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %} firewall: hostgroups: anywhere: @@ -9,7 +10,7 @@ firewall: ips: delete: insert: - - 172.17.0.0/24 + - {{ DNET }}/24 localhost: ips: delete: diff --git a/salt/fleet/event_enable-fleet.sls b/salt/fleet/event_enable-fleet.sls index d09749a55..34b031685 100644 --- a/salt/fleet/event_enable-fleet.sls +++ b/salt/fleet/event_enable-fleet.sls @@ -1,4 +1,10 @@ -{% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret default') %} +{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %} +{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %} +{% if FLEETNODE or FLEETMANAGER %} + {% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret default') %} +{% else %} + {% set ENROLLSECRET = '' %} +{% endif %} {% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls index 220f3c4cb..db3414a18 100644 --- a/salt/fleet/init.sls +++ b/salt/fleet/init.sls @@ -12,6 +12,8 @@ {% else %} {% set MAINIP = salt['pillar.get']('global:managerip') %} {% endif %} +{% set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %} + include: - mysql @@ -71,7 +73,7 @@ fleetdb: fleetdbuser: mysql_user.present: - - host: 172.17.0.0/255.255.0.0 + - host: {{ DNET }}/255.255.0.0 - password: {{ FLEETPASS }} - connection_host: {{ MAINIP }} - connection_port: 3306 @@ -85,7 +87,7 @@ fleetdbpriv: - grant: all privileges - database: fleet.* - user: fleetdbuser - - host: 172.17.0.0/255.255.0.0 + - host: {{ DNET }}/255.255.0.0 - connection_host: {{ MAINIP }} - connection_port: 3306 - connection_user: root @@ -132,4 +134,9 @@ so-fleet: - watch: - /opt/so/conf/fleet/etc +append_so-fleet_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-fleet + {% endif %} \ No newline at end of file diff --git a/salt/freqserver/init.sls b/salt/freqserver/init.sls index 668e33079..f514353a1 100644 --- a/salt/freqserver/init.sls +++ b/salt/freqserver/init.sls @@ -43,19 +43,24 @@ freqlogdir: so-freqimage: cmd.run: - - name: docker pull --disable-content-trust=false docker.io/{{ IMAGEREPO }}/so-freqserver:HH1.0.3 + - name: docker pull {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-freqserver:{{ VERSION }} so-freq: docker_container.running: - require: - so-freqimage - - image: docker.io/{{ IMAGEREPO }}/so-freqserver:HH1.0.3 + - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-freqserver:{{ VERSION }} - hostname: freqserver - name: so-freqserver - user: freqserver - binds: - /opt/so/log/freq_server:/var/log/freq_server:rw +append_so-freq_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-freq + {% else %} freqserver_state_not_allowed: diff --git a/salt/grafana/dashboards/eval/eval.json b/salt/grafana/dashboards/eval/eval.json index 241db393e..c9f3bced4 100644 --- a/salt/grafana/dashboards/eval/eval.json +++ b/salt/grafana/dashboards/eval/eval.json @@ -3565,7 +3565,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -3636,7 +3636,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -3656,7 +3656,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4036,7 +4036,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -4084,7 +4084,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -4143,7 +4143,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -4214,7 +4214,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -4234,7 +4234,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4278,7 +4278,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -4298,7 +4298,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ diff --git a/salt/grafana/dashboards/manager/manager.json b/salt/grafana/dashboards/manager/manager.json index ede457cdb..c5c09ae0e 100644 --- a/salt/grafana/dashboards/manager/manager.json +++ b/salt/grafana/dashboards/manager/manager.json @@ -1795,7 +1795,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -1860,7 +1860,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -1880,7 +1880,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -1924,7 +1924,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -1944,7 +1944,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2459,7 +2459,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -2524,7 +2524,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2544,7 +2544,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2588,7 +2588,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -2608,7 +2608,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3168,7 +3168,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -3233,7 +3233,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -3253,7 +3253,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3297,7 +3297,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -3317,7 +3317,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3463,7 +3463,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -3510,7 +3510,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -3700,7 +3700,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -3765,7 +3765,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -3785,7 +3785,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3829,7 +3829,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -3849,7 +3849,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ diff --git a/salt/grafana/dashboards/managersearch/managersearch.json b/salt/grafana/dashboards/managersearch/managersearch.json index 657239b88..838a37426 100644 --- a/salt/grafana/dashboards/managersearch/managersearch.json +++ b/salt/grafana/dashboards/managersearch/managersearch.json @@ -1799,7 +1799,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -1864,7 +1864,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -1884,7 +1884,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -1928,7 +1928,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -1948,7 +1948,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2546,7 +2546,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -2611,7 +2611,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2631,7 +2631,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2675,7 +2675,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -2695,7 +2695,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3299,7 +3299,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT derivative(mean(\"rx_bytes\"), 1s) *8 FROM \"docker_container_net\" WHERE (\"host\" = '{{ SERVERNAME }}' AND \"container_name\" = 'so-influxdb') AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT non_negative_derivative(mean(\"rx_bytes\"), 1s) *8 FROM \"docker_container_net\" WHERE (\"host\" = '{{ SERVERNAME }}' AND \"container_name\" = 'so-influxdb') AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -3319,7 +3319,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3380,7 +3380,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3785,7 +3785,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3846,7 +3846,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4164,7 +4164,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -4211,7 +4211,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], diff --git a/salt/grafana/dashboards/search_nodes/searchnode.json b/salt/grafana/dashboards/search_nodes/searchnode.json index 8677d9f27..a7170d276 100644 --- a/salt/grafana/dashboards/search_nodes/searchnode.json +++ b/salt/grafana/dashboards/search_nodes/searchnode.json @@ -2135,7 +2135,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -2182,7 +2182,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -2781,7 +2781,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -2846,7 +2846,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2866,7 +2866,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2910,7 +2910,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -2930,7 +2930,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3353,7 +3353,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -3418,7 +3418,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -3438,7 +3438,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3482,7 +3482,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -3502,7 +3502,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ diff --git a/salt/grafana/dashboards/sensor_nodes/sensor.json b/salt/grafana/dashboards/sensor_nodes/sensor.json index 83b4bd921..048bb5a34 100644 --- a/salt/grafana/dashboards/sensor_nodes/sensor.json +++ b/salt/grafana/dashboards/sensor_nodes/sensor.json @@ -2729,7 +2729,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -2800,7 +2800,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2820,7 +2820,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2864,7 +2864,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -2884,7 +2884,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3311,7 +3311,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -3359,7 +3359,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -3418,7 +3418,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -3489,7 +3489,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -3509,7 +3509,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4085,7 +4085,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -4156,7 +4156,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -4176,7 +4176,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4220,7 +4220,7 @@ "measurement": "docker_container_net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -4240,7 +4240,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ diff --git a/salt/grafana/dashboards/standalone/standalone.json b/salt/grafana/dashboards/standalone/standalone.json index d5ddb4ca3..3bab1ff5f 100644 --- a/salt/grafana/dashboards/standalone/standalone.json +++ b/salt/grafana/dashboards/standalone/standalone.json @@ -2010,7 +2010,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -2081,7 +2081,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2101,7 +2101,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2145,7 +2145,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "B", "resultFormat": "time_series", @@ -2165,7 +2165,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -2794,7 +2794,7 @@ "aliasColors": { "InBound": "#629E51", "OutBound": "#5195CE", - "net.derivative": "#1F78C1" + "net.non_negative_derivative": "#1F78C1" }, "bars": false, "dashLength": 10, @@ -2865,7 +2865,7 @@ "measurement": "net", "orderByTime": "ASC", "policy": "default", - "query": "SELECT 8 * derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT 8 * non_negative_derivative(mean(\"bytes_recv\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2885,7 +2885,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3466,7 +3466,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -3527,7 +3527,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4102,7 +4102,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4163,7 +4163,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4854,7 +4854,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -4915,7 +4915,7 @@ "params": [ "1s" ], - "type": "derivative" + "type": "non_negative_derivative" }, { "params": [ @@ -5202,7 +5202,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], @@ -5250,7 +5250,7 @@ }, { "params": [], - "type": "difference" + "type": "non_negative_difference" } ] ], diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index 39c2cc26c..8fe88f354 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -236,6 +236,11 @@ so-grafana: - watch: - file: /opt/so/conf/grafana/* +append_so-grafana_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-grafana + {% endif %} {% else %} diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls index 439c778aa..2aacb973d 100644 --- a/salt/idstools/init.sls +++ b/salt/idstools/init.sls @@ -58,11 +58,12 @@ rulesdir: - makedirs: True synclocalnidsrules: - file.managed: - - name: /opt/so/rules/nids/local.rules - - source: salt://idstools/local.rules + file.recurse: + - name: /opt/so/rules/nids/ + - source: salt://idstools/ - user: 939 - group: 939 + - include_pat: 'E@.rules' so-idstools: docker_container.running: @@ -75,10 +76,15 @@ so-idstools: - watch: - file: idstoolsetcsync +append_so-idstools_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-idstools + {% else %} idstools_state_not_allowed: test.fail_without_changes: - name: idstools_state_not_allowed -{% endif%} \ No newline at end of file +{% endif%} diff --git a/salt/influxdb/init.sls b/salt/influxdb/init.sls index 669c9e9eb..9dc7ee692 100644 --- a/salt/influxdb/init.sls +++ b/salt/influxdb/init.sls @@ -54,6 +54,11 @@ so-influxdb: - watch: - file: influxdbconf +append_so-influxdb_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-influxdb + {% endif %} {% else %} diff --git a/salt/kibana/bin/keepkibanahappy.sh b/salt/kibana/bin/keepkibanahappy.sh index e8534ec12..541a666bd 100644 --- a/salt/kibana/bin/keepkibanahappy.sh +++ b/salt/kibana/bin/keepkibanahappy.sh @@ -4,7 +4,7 @@ echo -n "Waiting for ElasticSearch..." COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 30 ]]; do - curl --output /dev/null --silent --head --fail http://{{ ES }}:9200 + curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200 if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" echo "connected!" @@ -28,7 +28,7 @@ MAX_WAIT=240 # Check to see if Kibana is available wait_step=0 - until curl -s -XGET http://{{ ES }}:5601 > /dev/null ; do + until curl -s -XGET -L http://{{ ES }}:5601 > /dev/null ; do wait_step=$(( ${wait_step} + 1 )) echo "Waiting on Kibana...Attempt #$wait_step" if [ ${wait_step} -gt ${MAX_WAIT} ]; then @@ -42,12 +42,12 @@ wait_step=0 # Apply Kibana template echo echo "Applying Kibana template..." - curl -s -XPUT http://{{ ES }}:9200/_template/kibana \ + curl -s -XPUT -L http://{{ ES }}:9200/_template/kibana \ -H 'Content-Type: application/json' \ -d'{"index_patterns" : ".kibana", "settings": { "number_of_shards" : 1, "number_of_replicas" : 0 }, "mappings" : { "search": {"properties": {"hits": {"type": "integer"}, "version": {"type": "integer"}}}}}' echo - curl -s -XPUT "{{ ES }}:9200/.kibana/_settings" \ + curl -s -XPUT -L "{{ ES }}:9200/.kibana/_settings" \ -H 'Content-Type: application/json' \ -d'{"index" : {"number_of_replicas" : 0}}' echo diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls index 7f91719d4..02e76495d 100644 --- a/salt/kibana/init.sls +++ b/salt/kibana/init.sls @@ -90,6 +90,11 @@ so-kibana: - port_bindings: - 0.0.0.0:5601:5601 +append_so-kibana_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-kibana + kibanadashtemplate: file.managed: - name: /opt/so/conf/kibana/saved_objects.ndjson.template diff --git a/salt/logstash/etc/certs/Put.Your.Certs.Here.txt b/salt/logstash/etc/certs/Put.Your.Certs.Here.txt new file mode 100644 index 000000000..e69de29bb diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index ad11bf567..e23e4eef2 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -173,6 +173,7 @@ so-logstash: - /sys/fs/cgroup:/sys/fs/cgroup:ro - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro + - /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro {% if grains['role'] == 'so-heavynode' %} - /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% else %} @@ -201,6 +202,11 @@ so-logstash: - file: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }} {% endfor %} +append_so-logstash_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-logstash + {% else %} logstash_state_not_allowed: diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 66e614b62..b506d06bf 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -81,6 +81,11 @@ so-aptcacherng: - /opt/so/log/aptcacher-ng:/var/log/apt-cacher-ng:rw - /opt/so/conf/aptcacher-ng/etc/acng.conf:/etc/apt-cacher-ng/acng.conf:ro +append_so-aptcacherng_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-aptcacherng + {% endif %} strelka_yara_update: diff --git a/salt/minio/init.sls b/salt/minio/init.sls index c1a681747..484eac1f9 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -62,6 +62,11 @@ so-minio: - /etc/pki/minio.crt:/.minio/certs/public.crt:ro - entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data" +append_so-minio_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-minio + {% else %} minio_state_not_allowed: diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls index 818b5c303..5fb187ab8 100644 --- a/salt/mysql/init.sls +++ b/salt/mysql/init.sls @@ -94,9 +94,20 @@ so-mysql: - /opt/so/conf/mysql/etc cmd.run: - name: until nc -z {{ MAINIP }} 3306; do sleep 1; done - - timeout: 900 + - timeout: 600 - onchanges: - docker_container: so-mysql + module.run: + - so.mysql_conn: + - retry: 300 + - onchanges: + - cmd: so-mysql + +append_so-mysql_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-mysql + {% endif %} {% else %} diff --git a/salt/nginx/etc/nginx.conf.so-managersearch b/salt/nginx/etc/nginx.conf similarity index 69% rename from salt/nginx/etc/nginx.conf.so-managersearch rename to salt/nginx/etc/nginx.conf index f3dd219b7..e65979f92 100644 --- a/salt/nginx/etc/nginx.conf.so-managersearch +++ b/salt/nginx/etc/nginx.conf @@ -1,22 +1,26 @@ -{%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} -{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} +{%- set role = grains.id.split('_') | last %} +{%- if role == 'fleet' %} + {% set mainint = salt['pillar.get']('host:mainint') %} + {% set main_ip = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %} +{%- endif %} + +{%- set manager_ip = salt['pillar.get']('manager:mainip', '') %} +{%- set url_base = salt['pillar.get']('global:url_base') %} + +{%- set fleet_manager = salt['pillar.get']('global:fleet_manager') %} +{%- set fleet_node = salt['pillar.get']('global:fleet_node') %} +{%- set fleet_ip = salt['pillar.get']('global:fleet_ip', None) %} +{%- set airgap = salt['pillar.get']('global:airgap', 'False') %} -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ worker_processes auto; error_log /var/log/nginx/error.log; pid /run/nginx.pid; -# Load dynamic modules. See /usr/share/nginx/README.dynamic. include /usr/share/nginx/modules/*.conf; events { - worker_connections 1024; + worker_connections 1024; } http { @@ -33,62 +37,19 @@ http { types_hash_max_size 2048; client_max_body_size 2500M; + server_tokens off; + include /etc/nginx/mime.types; default_type application/octet-stream; - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. include /etc/nginx/conf.d/*.conf; - #server { - # listen 80 default_server; - # listen [::]:80 default_server; - # server_name _; - # root /opt/socore/html; - # index index.html; + {%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'fleet', 'import'] %} - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - # location / { - # } - - # error_page 404 /404.html; - # location = /40x.html { - # } - - # error_page 500 502 503 504 /50x.html; - # location = /50x.html { - # } - #} - server { - listen 80 default_server; - server_name _; - return 301 https://$host$request_uri; - } - {%- if ISAIRGAP is sameas true %} - server { - listen 7788; - server_name _; - root /opt/socore/html/repo; - location /rules/ { - allow all; - sendfile on; - sendfile_max_chunk 1m; - autoindex on; - autoindex_exact_size off; - autoindex_format html; - autoindex_localtime on; - } - } - {%- endif %} - - -{% if FLEET_MANAGER %} + {%- if (fleet_manager or role == 'fleet') and role != 'import' %} server { listen 8090 ssl http2 default_server; - server_name _; + server_name {{ url_base }}; root /opt/socore/html; index blank.html; @@ -100,20 +61,44 @@ http { ssl_prefer_server_ciphers on; location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { - grpc_pass grpcs://{{ managerip }}:8080; + {%- if role == 'fleet' %} + grpc_pass grpcs://{{ main_ip }}:8080; + {%- else %} + grpc_pass grpcs://{{ manager_ip }}:8080; + {%- endif %} grpc_set_header Host $host; grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_buffering off; } } -{% endif %} - -# Settings for a TLS enabled server. + {%- endif %} server { - listen 443 ssl http2 default_server; - #listen [::]:443 ssl http2 default_server; - server_name _; + listen 80 default_server; + server_name _; + return 307 https://{{ url_base }}$request_uri; + } + + server { + listen 443 ssl http2 default_server; + server_name _; + return 307 https://{{ url_base }}$request_uri; + + ssl_certificate "/etc/pki/nginx/server.crt"; + ssl_certificate_key "/etc/pki/nginx/server.key"; + ssl_session_cache shared:SSL:1m; + ssl_session_timeout 10m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + ssl_protocols TLSv1.2; + } + + {%- endif %} + + {%- if role == 'fleet' %} + server { + listen 443 ssl http2; + server_name {{ url_base }}; root /opt/socore/html; index index.html; @@ -123,12 +108,57 @@ http { ssl_session_timeout 10m; ssl_ciphers HIGH:!aNULL:!MD5; ssl_prefer_server_ciphers on; + ssl_protocols TLSv1.2; - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; + location /fleet/ { + proxy_pass https://{{ main_ip }}:8080; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + error_page 500 502 503 504 /50x.html; + location = /usr/share/nginx/html/50x.html { + } + } + {%- elif role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %} + + {%- if airgap is sameas true %} + server { + listen 7788; + server_name {{ url_base }}; + root /opt/socore/html/repo; + location /rules/ { + allow all; + sendfile on; + sendfile_max_chunk 1m; + autoindex on; + autoindex_exact_size off; + autoindex_format html; + autoindex_localtime on; + } + } + {%- endif %} + + server { + listen 443 ssl http2; + server_name {{ url_base }}; + root /opt/socore/html; + index index.html; + + ssl_certificate "/etc/pki/nginx/server.crt"; + ssl_certificate_key "/etc/pki/nginx/server.key"; + ssl_session_cache shared:SSL:1m; + ssl_session_timeout 10m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + ssl_protocols TLSv1.2; location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { - proxy_pass http://{{ managerip }}:9822; + proxy_pass http://{{ manager_ip }}:9822; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -142,7 +172,7 @@ http { location / { auth_request /auth/sessions/whoami; - proxy_pass http://{{ managerip }}:9822/; + proxy_pass http://{{ manager_ip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -156,7 +186,7 @@ http { location ~ ^/auth/.*?(whoami|login|logout|settings) { rewrite /auth/(.*) /$1 break; - proxy_pass http://{{ managerip }}:4433; + proxy_pass http://{{ manager_ip }}:4433; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -200,7 +230,7 @@ http { proxy_set_header X-Forwarded-Proto $scheme; } - {%- if ISAIRGAP is sameas true %} + {%- if airgap is sameas true %} location /repo/ { allow all; sendfile on; @@ -210,13 +240,12 @@ http { autoindex_format html; autoindex_localtime on; } - {%- endif %} location /grafana/ { auth_request /auth/sessions/whoami; rewrite /grafana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:3000/; + proxy_pass http://{{ manager_ip }}:3000/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -229,7 +258,7 @@ http { location /kibana/ { auth_request /auth/sessions/whoami; rewrite /kibana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:5601/; + proxy_pass http://{{ manager_ip }}:5601/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -240,7 +269,7 @@ http { } location /nodered/ { - proxy_pass http://{{ managerip }}:1880/; + proxy_pass http://{{ manager_ip }}:1880/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -253,7 +282,7 @@ http { } location /playbook/ { - proxy_pass http://{{ managerip }}:3200/playbook/; + proxy_pass http://{{ manager_ip }}:3200/playbook/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -263,13 +292,16 @@ http { proxy_set_header X-Forwarded-Proto $scheme; } - {%- if FLEET_NODE %} + {%- if fleet_node %} + location /fleet/ { - return 301 https://{{ FLEET_IP }}/fleet; + return 307 https://{{ fleet_ip }}/fleet; } - {%- else %} + + {%- else %} + location /fleet/ { - proxy_pass https://{{ managerip }}:8080; + proxy_pass https://{{ manager_ip }}:8080; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -278,10 +310,11 @@ http { proxy_set_header Proxy ""; proxy_set_header X-Forwarded-Proto $scheme; } - {%- endif %} + + {%- endif %} location /thehive/ { - proxy_pass http://{{ managerip }}:9000/thehive/; + proxy_pass http://{{ manager_ip }}:9000/thehive/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_http_version 1.1; # this is essential for chunked responses to work @@ -293,7 +326,7 @@ http { } location /cortex/ { - proxy_pass http://{{ managerip }}:9001/cortex/; + proxy_pass http://{{ manager_ip }}:9001/cortex/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_http_version 1.1; # this is essential for chunked responses to work @@ -305,7 +338,7 @@ http { } location /soctopus/ { - proxy_pass http://{{ managerip }}:7000/; + proxy_pass http://{{ manager_ip }}:7000/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -331,7 +364,7 @@ http { if ($http_authorization = "") { return 403; } - proxy_pass http://{{ managerip }}:9822/; + proxy_pass http://{{ manager_ip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; proxy_set_header Host $host; @@ -345,16 +378,12 @@ http { location @error401 { add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; - return 302 /auth/self-service/browser/flows/login; + return 302 /auth/self-service/login/browser; } - #error_page 404 /404.html; - # location = /40x.html { - #} - error_page 500 502 503 504 /50x.html; location = /usr/share/nginx/html/50x.html { } } - + {%- endif %} } diff --git a/salt/nginx/etc/nginx.conf.so-eval b/salt/nginx/etc/nginx.conf.so-eval deleted file mode 100644 index cfc37a626..000000000 --- a/salt/nginx/etc/nginx.conf.so-eval +++ /dev/null @@ -1,361 +0,0 @@ -{%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} -{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} - -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - client_max_body_size 2500M; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - #server { - # listen 80 default_server; - # listen [::]:80 default_server; - # server_name _; - # root /opt/socore/html; - # index index.html; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - # location / { - # } - - # error_page 404 /404.html; - # location = /40x.html { - # } - - # error_page 500 502 503 504 /50x.html; - # location = /50x.html { - # } - #} - server { - listen 80 default_server; - server_name _; - return 301 https://$host$request_uri; - } - {%- if ISAIRGAP is sameas true %} - server { - listen 7788; - server_name _; - root /opt/socore/html/repo; - location /rules/ { - allow all; - sendfile on; - sendfile_max_chunk 1m; - autoindex on; - autoindex_exact_size off; - autoindex_format html; - autoindex_localtime on; - } - } - {%- endif %} - - -{% if FLEET_MANAGER %} - server { - listen 8090 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index blank.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { - grpc_pass grpcs://{{ managerip }}:8080; - grpc_set_header Host $host; - grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_buffering off; - } - - } -{% endif %} - -# Settings for a TLS enabled server. - - server { - listen 443 ssl http2 default_server; - #listen [::]:443 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index index.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { - proxy_pass http://{{ managerip }}:9822; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location / { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location ~ ^/auth/.*?(whoami|login|logout|settings) { - rewrite /auth/(.*) /$1 break; - proxy_pass http://{{ managerip }}:4433; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cyberchef/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /navigator/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /packages/ { - try_files $uri =206; - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - {%- if ISAIRGAP is sameas true %} - location /repo/ { - allow all; - sendfile on; - sendfile_max_chunk 1m; - autoindex on; - autoindex_exact_size off; - autoindex_format html; - autoindex_localtime on; - } - - {%- endif %} - - location /grafana/ { - auth_request /auth/sessions/whoami; - rewrite /grafana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:3000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/ { - auth_request /auth/sessions/whoami; - rewrite /kibana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:5601/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /nodered/ { - proxy_pass http://{{ managerip }}:1880/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /playbook/ { - proxy_pass http://{{ managerip }}:3200/playbook/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - {%- if FLEET_NODE %} - location /fleet/ { - return 301 https://{{ FLEET_IP }}/fleet; - } - {%- else %} - location /fleet/ { - proxy_pass https://{{ managerip }}:8080; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - {%- endif %} - - location /thehive/ { - proxy_pass http://{{ managerip }}:9000/thehive/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cortex/ { - proxy_pass http://{{ managerip }}:9001/cortex/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /soctopus/ { - proxy_pass http://{{ managerip }}:7000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/app/soc/ { - rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; - } - - location /kibana/app/fleet/ { - rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent; - } - - location /kibana/app/soctopus/ { - rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; - } - - location /sensoroniagents/ { - if ($http_authorization = "") { - return 403; - } - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - error_page 401 = @error401; - - location @error401 { - add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; - return 302 /auth/self-service/browser/flows/login; - } - - #error_page 404 /404.html; - # location = /usr/share/nginx/html/40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -} diff --git a/salt/nginx/etc/nginx.conf.so-fleet b/salt/nginx/etc/nginx.conf.so-fleet deleted file mode 100644 index 937f09a5b..000000000 --- a/salt/nginx/etc/nginx.conf.so-fleet +++ /dev/null @@ -1,100 +0,0 @@ -{% set MAININT = salt['pillar.get']('host:mainint') %} -{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} - -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -user nginx; -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - include /etc/nginx/conf.d/*.conf; - - server { - listen 80 default_server; - server_name _; - return 301 https://$host$request_uri; - } - - server { - listen 8090 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index blank.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { - grpc_pass grpcs://{{ MAINIP }}:8080; - grpc_set_header Host $host; - grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_buffering off; - } - - } - - - server { - listen 443 ssl http2 default_server; - server_name _; - root /opt/socore/html/packages; - index index.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - location /fleet/ { - proxy_pass https://{{ MAINIP }}:8080; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - #error_page 404 /404.html; - # location = /40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -} diff --git a/salt/nginx/etc/nginx.conf.so-heavynode b/salt/nginx/etc/nginx.conf.so-heavynode deleted file mode 100644 index 7ec3fef7d..000000000 --- a/salt/nginx/etc/nginx.conf.so-heavynode +++ /dev/null @@ -1,89 +0,0 @@ -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -user nginx; -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - server { - listen 80 default_server; - listen [::]:80 default_server; - server_name _; - root /usr/share/nginx/html; - - # Load configuration files for the default server block. - include /etc/nginx/default.d/*.conf; - - location / { - } - - error_page 404 /404.html; - location = /40x.html { - } - - error_page 500 502 503 504 /50x.html; - location = /50x.html { - } - } - -# Settings for a TLS enabled server. -# -# server { -# listen 443 ssl http2 default_server; -# listen [::]:443 ssl http2 default_server; -# server_name _; -# root /usr/share/nginx/html; -# -# ssl_certificate "/etc/pki/nginx/server.crt"; -# ssl_certificate_key "/etc/pki/nginx/private/server.key"; -# ssl_session_cache shared:SSL:1m; -# ssl_session_timeout 10m; -# ssl_ciphers HIGH:!aNULL:!MD5; -# ssl_prefer_server_ciphers on; -# -# # Load configuration files for the default server block. -# include /etc/nginx/default.d/*.conf; -# -# location / { -# } -# -# #error_page 404 /404.html; -# # location = /40x.html { -# #} -# -# error_page 500 502 503 504 /50x.html; -# location = /usr/share/nginx/html/50x.html { -# } -# } - -} diff --git a/salt/nginx/etc/nginx.conf.so-helix b/salt/nginx/etc/nginx.conf.so-helix deleted file mode 100644 index e5a68c09d..000000000 --- a/salt/nginx/etc/nginx.conf.so-helix +++ /dev/null @@ -1,89 +0,0 @@ -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -user nginx; -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - server { - listen 80 default_server; - listen [::]:80 default_server; - server_name _; - root /usr/share/nginx/html; - - # Load configuration files for the default server block. - include /etc/nginx/default.d/*.conf; - - location / { - } - - #error_page 404 /404.html; - # location = /40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -# Settings for a TLS enabled server. -# -# server { -# listen 443 ssl http2 default_server; -# listen [::]:443 ssl http2 default_server; -# server_name _; -# root /usr/share/nginx/html; -# -# ssl_certificate "/etc/pki/nginx/server.crt"; -# ssl_certificate_key "/etc/pki/nginx/private/server.key"; -# ssl_session_cache shared:SSL:1m; -# ssl_session_timeout 10m; -# ssl_ciphers HIGH:!aNULL:!MD5; -# ssl_prefer_server_ciphers on; -# -# # Load configuration files for the default server block. -# include /etc/nginx/default.d/*.conf; -# -# location / { -# } -# -# error_page 404 /404.html; -# location = /40x.html { -# } -# -# error_page 500 502 503 504 /50x.html; -# location = /50x.html { -# } -# } - -} diff --git a/salt/nginx/etc/nginx.conf.so-import b/salt/nginx/etc/nginx.conf.so-import deleted file mode 100644 index 1f180ad09..000000000 --- a/salt/nginx/etc/nginx.conf.so-import +++ /dev/null @@ -1,326 +0,0 @@ -{%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - client_max_body_size 2500M; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - #server { - # listen 80 default_server; - # listen [::]:80 default_server; - # server_name _; - # root /opt/socore/html; - # index index.html; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - # location / { - # } - - # error_page 404 /404.html; - # location = /40x.html { - # } - - # error_page 500 502 503 504 /50x.html; - # location = /50x.html { - # } - #} - server { - listen 80 default_server; - server_name _; - return 301 https://$host$request_uri; - } - -{% if FLEET_MANAGER %} - server { - listen 8090 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index blank.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { - grpc_pass grpcs://{{ managerip }}:8080; - grpc_set_header Host $host; - grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_buffering off; - } - - } -{% endif %} - -# Settings for a TLS enabled server. - - server { - listen 443 ssl http2 default_server; - #listen [::]:443 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index index.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { - proxy_pass http://{{ managerip }}:9822; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location / { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location ~ ^/auth/.*?(whoami|login|logout|settings) { - rewrite /auth/(.*) /$1 break; - proxy_pass http://{{ managerip }}:4433; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cyberchef/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /navigator/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /packages/ { - try_files $uri =206; - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /grafana/ { - auth_request /auth/sessions/whoami; - rewrite /grafana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:3000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/ { - auth_request /auth/sessions/whoami; - rewrite /kibana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:5601/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /nodered/ { - proxy_pass http://{{ managerip }}:1880/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /playbook/ { - proxy_pass http://{{ managerip }}:3200/playbook/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - {%- if FLEET_NODE %} - location /fleet/ { - return 301 https://{{ FLEET_IP }}/fleet; - } - {%- else %} - location /fleet/ { - proxy_pass https://{{ managerip }}:8080; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - {%- endif %} - - location /thehive/ { - proxy_pass http://{{ managerip }}:9000/thehive/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cortex/ { - proxy_pass http://{{ managerip }}:9001/cortex/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /soctopus/ { - proxy_pass http://{{ managerip }}:7000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/app/soc/ { - rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; - } - - location /kibana/app/fleet/ { - rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent; - } - - location /kibana/app/soctopus/ { - rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; - } - - location /sensoroniagents/ { - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - error_page 401 = @error401; - - location @error401 { - add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; - return 302 /auth/self-service/browser/flows/login; - } - - #error_page 404 /404.html; - # location = /usr/share/nginx/html/40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -} diff --git a/salt/nginx/etc/nginx.conf.so-manager b/salt/nginx/etc/nginx.conf.so-manager deleted file mode 100644 index 86122602c..000000000 --- a/salt/nginx/etc/nginx.conf.so-manager +++ /dev/null @@ -1,360 +0,0 @@ -{%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} -{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} - -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - client_max_body_size 2500M; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - #server { - # listen 80 default_server; - # listen [::]:80 default_server; - # server_name _; - # root /opt/socore/html; - # index index.html; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - # location / { - # } - - # error_page 404 /404.html; - # location = /40x.html { - # } - - # error_page 500 502 503 504 /50x.html; - # location = /50x.html { - # } - #} - server { - listen 80 default_server; - server_name _; - return 301 https://$host$request_uri; - } - {%- if ISAIRGAP is sameas true %} - server { - listen 7788; - server_name _; - root /opt/socore/html/repo; - location /rules/ { - allow all; - sendfile on; - sendfile_max_chunk 1m; - autoindex on; - autoindex_exact_size off; - autoindex_format html; - autoindex_localtime on; - } - } - {%- endif %} - -{% if FLEET_MANAGER %} - server { - listen 8090 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index blank.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { - grpc_pass grpcs://{{ managerip }}:8080; - grpc_set_header Host $host; - grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_buffering off; - } - - } -{% endif %} - -# Settings for a TLS enabled server. - - server { - listen 443 ssl http2 default_server; - #listen [::]:443 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index index.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { - proxy_pass http://{{ managerip }}:9822; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location / { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location ~ ^/auth/.*?(whoami|login|logout|settings) { - rewrite /auth/(.*) /$1 break; - proxy_pass http://{{ managerip }}:4433; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cyberchef/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /navigator/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /packages/ { - try_files $uri =206; - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /grafana/ { - auth_request /auth/sessions/whoami; - rewrite /grafana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:3000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/ { - auth_request /auth/sessions/whoami; - rewrite /kibana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:5601/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /nodered/ { - proxy_pass http://{{ managerip }}:1880/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /playbook/ { - proxy_pass http://{{ managerip }}:3200/playbook/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - {%- if ISAIRGAP is sameas true %} - location /repo/ { - allow all; - sendfile on; - sendfile_max_chunk 1m; - autoindex on; - autoindex_exact_size off; - autoindex_format html; - autoindex_localtime on; - } - - {%- endif %} - - {%- if FLEET_NODE %} - location /fleet/ { - return 301 https://{{ FLEET_IP }}/fleet; - } - {%- else %} - location /fleet/ { - proxy_pass https://{{ managerip }}:8080; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - {%- endif %} - - location /thehive/ { - proxy_pass http://{{ managerip }}:9000/thehive/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cortex/ { - proxy_pass http://{{ managerip }}:9001/cortex/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /soctopus/ { - proxy_pass http://{{ managerip }}:7000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/app/soc/ { - rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; - } - - location /kibana/app/fleet/ { - rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent; - } - - location /kibana/app/soctopus/ { - rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; - } - - location /sensoroniagents/ { - if ($http_authorization = "") { - return 403; - } - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - error_page 401 = @error401; - - location @error401 { - add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; - return 302 /auth/self-service/browser/flows/login; - } - - #error_page 404 /404.html; - # location = /40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -} diff --git a/salt/nginx/etc/nginx.conf.so-node b/salt/nginx/etc/nginx.conf.so-node deleted file mode 100644 index e5a68c09d..000000000 --- a/salt/nginx/etc/nginx.conf.so-node +++ /dev/null @@ -1,89 +0,0 @@ -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -user nginx; -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - server { - listen 80 default_server; - listen [::]:80 default_server; - server_name _; - root /usr/share/nginx/html; - - # Load configuration files for the default server block. - include /etc/nginx/default.d/*.conf; - - location / { - } - - #error_page 404 /404.html; - # location = /40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -# Settings for a TLS enabled server. -# -# server { -# listen 443 ssl http2 default_server; -# listen [::]:443 ssl http2 default_server; -# server_name _; -# root /usr/share/nginx/html; -# -# ssl_certificate "/etc/pki/nginx/server.crt"; -# ssl_certificate_key "/etc/pki/nginx/private/server.key"; -# ssl_session_cache shared:SSL:1m; -# ssl_session_timeout 10m; -# ssl_ciphers HIGH:!aNULL:!MD5; -# ssl_prefer_server_ciphers on; -# -# # Load configuration files for the default server block. -# include /etc/nginx/default.d/*.conf; -# -# location / { -# } -# -# error_page 404 /404.html; -# location = /40x.html { -# } -# -# error_page 500 502 503 504 /50x.html; -# location = /50x.html { -# } -# } - -} diff --git a/salt/nginx/etc/nginx.conf.so-sensor b/salt/nginx/etc/nginx.conf.so-sensor deleted file mode 100644 index e5a68c09d..000000000 --- a/salt/nginx/etc/nginx.conf.so-sensor +++ /dev/null @@ -1,89 +0,0 @@ -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -user nginx; -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - server { - listen 80 default_server; - listen [::]:80 default_server; - server_name _; - root /usr/share/nginx/html; - - # Load configuration files for the default server block. - include /etc/nginx/default.d/*.conf; - - location / { - } - - #error_page 404 /404.html; - # location = /40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -# Settings for a TLS enabled server. -# -# server { -# listen 443 ssl http2 default_server; -# listen [::]:443 ssl http2 default_server; -# server_name _; -# root /usr/share/nginx/html; -# -# ssl_certificate "/etc/pki/nginx/server.crt"; -# ssl_certificate_key "/etc/pki/nginx/private/server.key"; -# ssl_session_cache shared:SSL:1m; -# ssl_session_timeout 10m; -# ssl_ciphers HIGH:!aNULL:!MD5; -# ssl_prefer_server_ciphers on; -# -# # Load configuration files for the default server block. -# include /etc/nginx/default.d/*.conf; -# -# location / { -# } -# -# error_page 404 /404.html; -# location = /40x.html { -# } -# -# error_page 500 502 503 504 /50x.html; -# location = /50x.html { -# } -# } - -} diff --git a/salt/nginx/etc/nginx.conf.so-standalone b/salt/nginx/etc/nginx.conf.so-standalone deleted file mode 100644 index 35e1488ac..000000000 --- a/salt/nginx/etc/nginx.conf.so-standalone +++ /dev/null @@ -1,361 +0,0 @@ -{%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} -{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - -worker_processes auto; -error_log /var/log/nginx/error.log; -pid /run/nginx.pid; - -# Load dynamic modules. See /usr/share/nginx/README.dynamic. -include /usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - client_max_body_size 2500M; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /etc/nginx/conf.d/*.conf; - - #server { - # listen 80 default_server; - # listen [::]:80 default_server; - # server_name _; - # root /opt/socore/html; - # index index.html; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - # location / { - # } - - # error_page 404 /404.html; - # location = /40x.html { - # } - - # error_page 500 502 503 504 /50x.html; - # location = /50x.html { - # } - #} - server { - listen 80 default_server; - server_name _; - return 301 https://$host$request_uri; - } - {%- if ISAIRGAP is sameas true %} - server { - listen 7788; - server_name _; - root /opt/socore/html/repo; - location /rules/ { - allow all; - sendfile on; - sendfile_max_chunk 1m; - autoindex on; - autoindex_exact_size off; - autoindex_format html; - autoindex_localtime on; - } - } - {%- endif %} - - -{% if FLEET_MANAGER %} - server { - listen 8090 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index blank.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { - grpc_pass grpcs://{{ managerip }}:8080; - grpc_set_header Host $host; - grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_buffering off; - } - - } -{% endif %} - -# Settings for a TLS enabled server. - - server { - listen 443 ssl http2 default_server; - #listen [::]:443 ssl http2 default_server; - server_name _; - root /opt/socore/html; - index index.html; - - ssl_certificate "/etc/pki/nginx/server.crt"; - ssl_certificate_key "/etc/pki/nginx/server.key"; - ssl_session_cache shared:SSL:1m; - ssl_session_timeout 10m; - ssl_ciphers HIGH:!aNULL:!MD5; - ssl_prefer_server_ciphers on; - - # Load configuration files for the default server block. - #include /etc/nginx/default.d/*.conf; - - location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { - proxy_pass http://{{ managerip }}:9822; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location / { - auth_request /auth/sessions/whoami; - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location ~ ^/auth/.*?(whoami|login|logout|settings) { - rewrite /auth/(.*) /$1 break; - proxy_pass http://{{ managerip }}:4433; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cyberchef/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /navigator/ { - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /packages/ { - try_files $uri =206; - auth_request /auth/sessions/whoami; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - {%- if ISAIRGAP is sameas true %} - location /repo/ { - allow all; - sendfile on; - sendfile_max_chunk 1m; - autoindex on; - autoindex_exact_size off; - autoindex_format html; - autoindex_localtime on; - } - - {%- endif %} - - - location /grafana/ { - auth_request /auth/sessions/whoami; - rewrite /grafana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:3000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/ { - auth_request /auth/sessions/whoami; - rewrite /kibana/(.*) /$1 break; - proxy_pass http://{{ managerip }}:5601/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /nodered/ { - proxy_pass http://{{ managerip }}:1880/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /playbook/ { - proxy_pass http://{{ managerip }}:3200/playbook/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - {%- if FLEET_NODE %} - location /fleet/ { - return 301 https://{{ FLEET_IP }}/fleet; - } - {%- else %} - location /fleet/ { - proxy_pass https://{{ managerip }}:8080; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - {%- endif %} - - location /thehive/ { - proxy_pass http://{{ managerip }}:9000/thehive/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cortex/ { - proxy_pass http://{{ managerip }}:9001/cortex/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /soctopus/ { - proxy_pass http://{{ managerip }}:7000/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /kibana/app/soc/ { - rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; - } - - location /kibana/app/fleet/ { - rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent; - } - - location /kibana/app/soctopus/ { - rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; - } - - location /sensoroniagents/ { - if ($http_authorization = "") { - return 403; - } - proxy_pass http://{{ managerip }}:9822/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - error_page 401 = @error401; - - location @error401 { - add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; - return 302 /auth/self-service/browser/flows/login; - } - - #error_page 404 /404.html; - # location = /40x.html { - #} - - error_page 500 502 503 504 /50x.html; - location = /usr/share/nginx/html/50x.html { - } - } - -} diff --git a/salt/nginx/files/nav_layer_playbook.json b/salt/nginx/files/nav_layer_playbook.json index 7b7f39098..69db796e8 100644 --- a/salt/nginx/files/nav_layer_playbook.json +++ b/salt/nginx/files/nav_layer_playbook.json @@ -1,6 +1,6 @@ { "name": "Playbook", - "version": "2.2", + "version": "3.0", "domain": "mitre-enterprise", "description": "Current Coverage of Playbook", "filters": { @@ -13,16 +13,15 @@ }, "sorting": 0, "viewMode": 0, - "hideDisabled": "false", - "techniques": [{ - }], + "hideDisabled": false, + "techniques": [], "gradient": { "colors": ["#ff6666", "#ffe766", "#8ec843"], "minValue": 0, "maxValue": 100 }, "metadata": [], - "showTacticRowBackground": "false", + "showTacticRowBackground": false, "tacticRowBackground": "#dddddd", - "selectTechniquesAcrossTactics": "true" + "selectTechniquesAcrossTactics": true } diff --git a/salt/nginx/init.sls b/salt/nginx/init.sls index a3f6f33fd..8d6dd46f7 100644 --- a/salt/nginx/init.sls +++ b/salt/nginx/init.sls @@ -31,7 +31,7 @@ nginxconf: - user: 939 - group: 939 - template: jinja - - source: salt://nginx/etc/nginx.conf.{{ grains.role }} + - source: salt://nginx/etc/nginx.conf nginxlogdir: file.directory: @@ -98,6 +98,11 @@ so-nginx: - file: nginxconf - file: nginxconfdir +append_so-nginx_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-nginx + {% else %} nginx_state_not_allowed: diff --git a/salt/nodered/files/nodered_load_flows b/salt/nodered/files/nodered_load_flows index 78bab818a..3d6ed2a8c 100644 --- a/salt/nodered/files/nodered_load_flows +++ b/salt/nodered/files/nodered_load_flows @@ -3,10 +3,10 @@ default_salt_dir=/opt/so/saltstack/default echo "Waiting for connection" -until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do +until $(curl --output /dev/null --silent --head -L http://{{ ip }}:1880); do echo '.' sleep 1 done echo "Loading flows..." -curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json {{ ip }}:1880/flows +curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json -L {{ ip }}:1880/flows echo "Done loading..." diff --git a/salt/nodered/init.sls b/salt/nodered/init.sls index ac886a6b7..c4fb8cb37 100644 --- a/salt/nodered/init.sls +++ b/salt/nodered/init.sls @@ -52,8 +52,8 @@ noderedflowsload: file.managed: - name: /usr/sbin/so-nodered-load-flows - source: salt://nodered/files/nodered_load_flows - - user: 0 - - group: 0 + - user: root + - group: root - mode: 755 - template: jinja @@ -67,13 +67,18 @@ noderedlog: so-nodered: docker_container.running: - - image: {{ IMAGEREPO }}/so-nodered:HH1.2.2 + - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-nodered:{{ VERSION }} - interactive: True - binds: - /opt/so/conf/nodered/:/data:rw - port_bindings: - 0.0.0.0:1880:1880 +append_so-nodered_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-nodered + so-nodered-flows: cmd.run: - name: /usr/sbin/so-nodered-load-flows diff --git a/salt/pcap/files/sensoroni.json b/salt/pcap/files/sensoroni.json index 4fd31b96d..8a9027bd0 100644 --- a/salt/pcap/files/sensoroni.json +++ b/salt/pcap/files/sensoroni.json @@ -1,4 +1,4 @@ -{%- set MANAGER = salt['grains.get']('master') -%} +{%- set URLBASE = salt['pillar.get']('global:url_base') %} {%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%} {%- set CHECKININTERVALMS = salt['pillar.get']('pcap:sensor_checkin_interval_ms', 10000) -%} { @@ -6,7 +6,7 @@ "logLevel":"info", "agent": { "pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }}, - "serverUrl": "https://{{ MANAGER }}/sensoroniagents", + "serverUrl": "https://{{ URLBASE }}/sensoroniagents", "verifyCert": false, "modules": { "importer": {}, diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index ade70d718..5a13c1231 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -152,6 +152,24 @@ so-steno: - watch: - file: /opt/so/conf/steno/config +append_so-steno_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-steno + - unless: grep so-steno /opt/so/conf/so-status/so-status.conf + + {% if STENOOPTIONS.status == 'running' %} +delete_so-steno_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-steno$ + {% elif STENOOPTIONS.status == 'stopped' %} +so-steno_so-status.disabled: + file.comment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-steno$ + {% endif %} + so-sensoroni: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }} @@ -166,6 +184,11 @@ so-sensoroni: - watch: - file: /opt/so/conf/sensoroni/sensoroni.json +append_so-sensoroni_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-sensoroni + {% else %} pcap_state_not_allowed: diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls index c78743eb5..dca898eec 100644 --- a/salt/playbook/init.sls +++ b/salt/playbook/init.sls @@ -10,25 +10,26 @@ {% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} {%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db', None) -%} +{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %} + include: - mysql create_playbookdbuser: - module.run: - - mysql.user_create: - - user: playbookdbuser - - password: {{ PLAYBOOKPASS }} - - host: 172.17.0.0/255.255.0.0 - - connection_host: {{ MAINIP }} - - connection_port: 3306 - - connection_user: root - - connection_pass: {{ MYSQLPASS }} + mysql_user.present: + - name: playbookdbuser + - password: {{ PLAYBOOKPASS }} + - host: {{ DNET }}/255.255.255.0 + - connection_host: {{ MAINIP }} + - connection_port: 3306 + - connection_user: root + - connection_pass: {{ MYSQLPASS }} query_playbookdbuser_grants: mysql_query.run: - database: playbook - - query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'172.17.0.0/255.255.0.0';" + - query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DNET }}/255.255.255.0';" - connection_host: {{ MAINIP }} - connection_port: 3306 - connection_user: root @@ -91,6 +92,11 @@ so-playbook: - port_bindings: - 0.0.0.0:3200:3000 +append_so-playbook_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-playbook + {% endif %} so-playbooksynccron: diff --git a/salt/reactor/fleet.sls b/salt/reactor/fleet.sls index a32fb5cfd..a4226b027 100644 --- a/salt/reactor/fleet.sls +++ b/salt/reactor/fleet.sls @@ -31,16 +31,17 @@ def run(): print(line) # Update the enroll secret in the secrets pillar - for line in fileinput.input(SECRETSFILE, inplace=True): - line = re.sub(r'fleet_enroll-secret: \S*', f"fleet_enroll-secret: {ESECRET}", line.rstrip()) - print(line) + if ESECRET != "": + for line in fileinput.input(SECRETSFILE, inplace=True): + line = re.sub(r'fleet_enroll-secret: \S*', f"fleet_enroll-secret: {ESECRET}", line.rstrip()) + print(line) - # Update the Fleet host in the static pillar + # Update the Fleet host in the static pillar for line in fileinput.input(STATICFILE, inplace=True): line = re.sub(r'fleet_hostname: \S*', f"fleet_hostname: '{HOSTNAME}'", line.rstrip()) print(line) - # Update the Fleet IP in the static pillar + # Update the Fleet IP in the static pillar for line in fileinput.input(STATICFILE, inplace=True): line = re.sub(r'fleet_ip: \S*', f"fleet_ip: '{MAINIP}'", line.rstrip()) print(line) diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 1b7611eab..57f189865 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -70,6 +70,11 @@ so-redis: - watch: - file: /opt/so/conf/redis/etc +append_so-redis_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-redis + {% else %} redis_state_not_allowed: diff --git a/salt/registry/init.sls b/salt/registry/init.sls index c98577ca2..43b9d8fa6 100644 --- a/salt/registry/init.sls +++ b/salt/registry/init.sls @@ -45,7 +45,7 @@ dockerregistryconf: # Install the registry container so-dockerregistry: docker_container.running: - - image: registry:latest + - image: ghcr.io/security-onion-solutions/registry:latest - hostname: so-registry - restart_policy: always - port_bindings: @@ -57,6 +57,11 @@ so-dockerregistry: - /etc/pki/registry.crt:/etc/pki/registry.crt:ro - /etc/pki/registry.key:/etc/pki/registry.key:ro +append_so-dockerregistry_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-dockerregistry + {% else %} registry_state_not_allowed: diff --git a/salt/salt/lasthighstate.sls b/salt/salt/lasthighstate.sls new file mode 100644 index 000000000..606bd1082 --- /dev/null +++ b/salt/salt/lasthighstate.sls @@ -0,0 +1,4 @@ +lasthighstate: + file.touch: + - name: /opt/so/log/salt/lasthighstate + - order: last \ No newline at end of file diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 89ceadd5b..7ef63bd68 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -1,5 +1,14 @@ -{% import_yaml 'salt/minion.defaults.yaml' as salt %} -{% set SALTVERSION = salt.salt.minion.version %} +{% import_yaml 'salt/minion.defaults.yaml' as saltminion %} +{% set SALTVERSION = saltminion.salt.minion.version %} + +{% if grains.os == 'Ubuntu' %} + {% set SPLITCHAR = '+' %} +{% else %} + {% set SPLITCHAR = '-' %} +{% endif %} + +{% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %} +{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} {% if grains.os|lower == 'ubuntu' %} {% set COMMON = 'salt-common' %} @@ -9,10 +18,14 @@ {% if grains.saltversion|string != SALTVERSION|string %} {% if grains.os|lower in ['centos', 'redhat'] %} - {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% if ISAIRGAP is sameas true %} + {% set UPGRADECOMMAND = 'yum clean all && yum versionlock delete "salt-*" && /usr/sbin/bootstrap-salt.sh -X -s 120 -r -F -x python3 stable ' ~ SALTVERSION ~ ' && yum versionlock add "salt-*"' %} + {% else %} + {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && /usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION ~ ' && yum versionlock add "salt-*"' %} + {% endif %} {% elif grains.os|lower == 'ubuntu' %} - {% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && /usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION ~ ' && apt-mark hold salt-common && apt-mark hold salt-minion' %} {% endif %} {% else %} - {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %} {% endif %} \ No newline at end of file diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 8694ffbc7..e774a2c7d 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -2,4 +2,4 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: master: - version: 3001.1 \ No newline at end of file + version: 3002.2 \ No newline at end of file diff --git a/salt/salt/minion-check.sls b/salt/salt/minion-check.sls new file mode 100644 index 000000000..e8a0c2639 --- /dev/null +++ b/salt/salt/minion-check.sls @@ -0,0 +1,19 @@ +include: + - salt.minion-state-apply-test + +state-apply-test: + schedule.present: + - name: salt-minion-state-apply-test + - function: state.sls + - job_args: + - salt.minion-state-apply-test + - minutes: 5 + - splay: + start: 0 + end: 180 + +/usr/sbin/so-salt-minion-check -q: + cron.present: + - identifier: so-salt-minion-check + - user: root + - minute: '*/5' \ No newline at end of file diff --git a/salt/salt/minion-state-apply-test.sls b/salt/salt/minion-state-apply-test.sls new file mode 100644 index 000000000..9d7e90e63 --- /dev/null +++ b/salt/salt/minion-state-apply-test.sls @@ -0,0 +1,4 @@ +minion-state-apply-test: + file.touch: + - name: /opt/so/log/salt/state-apply-test + - order: first \ No newline at end of file diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index 31c313df6..baaaff411 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -2,4 +2,5 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: minion: - version: 3001.1 \ No newline at end of file + version: 3002.2 + check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default \ No newline at end of file diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index b2d3a2913..de85693c6 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,17 +1,19 @@ {% from 'salt/map.jinja' import COMMON with context %} {% from 'salt/map.jinja' import UPGRADECOMMAND with context %} +{% from 'salt/map.jinja' import SALTVERSION %} +{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %} include: - salt install_salt_minion: cmd.run: - - name: {{ UPGRADECOMMAND }} - -#versionlock_salt_minion: -# module.run: -# - pkg.hold: -# - name: "salt-*" + - name: | + exec 0>&- # close stdin + exec 1>&- # close stdout + exec 2>&- # close stderr + nohup /bin/sh -c '{{ UPGRADECOMMAND }}' & + - onlyif: test "{{INSTALLEDSALTVERSION}}" != "{{SALTVERSION}}" salt_minion_package: pkg.installed: @@ -19,8 +21,10 @@ salt_minion_package: - {{ COMMON }} - salt-minion - hold: True + - onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}" salt_minion_service: service.running: - name: salt-minion - enable: True + - onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}" \ No newline at end of file diff --git a/salt/soc/files/kratos/kratos.yaml b/salt/soc/files/kratos/kratos.yaml index 928e744d0..c26aeec3f 100644 --- a/salt/soc/files/kratos/kratos.yaml +++ b/salt/soc/files/kratos/kratos.yaml @@ -2,7 +2,7 @@ {%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%} selfservice: - strategies: + methods: password: enabled: true diff --git a/salt/soc/files/soc/alerts.actions.json b/salt/soc/files/soc/alerts.actions.json index 5924750a4..b825c0131 100644 --- a/salt/soc/files/soc/alerts.actions.json +++ b/salt/soc/files/soc/alerts.actions.json @@ -1,6 +1,6 @@ [ - { "name": "", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" }, - { "name": "", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" }, - { "name": "", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" }, - { "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" } + { "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" }, + { "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" }, + { "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" }, + { "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" } ] \ No newline at end of file diff --git a/salt/soc/files/soc/changes.json b/salt/soc/files/soc/changes.json index 5aa9b220b..90f71f940 100644 --- a/salt/soc/files/soc/changes.json +++ b/salt/soc/files/soc/changes.json @@ -1,13 +1,42 @@ { - "title": "Security Onion 2.3.1 is here!", + "title": "Security Onion 2.3.10 is here!", "changes": [ - { "summary": "Fixed a SOC issue in airgap mode that was preventing people from logging in." }, - { "summary": "Downloading Elastic features images will now download the correct images." }, - { "summary": "Winlogbeat download no longer requires Internet access." }, - { "summary": "Adjusted Alerts quick action bar to allow searching for a specific value while remaining in Alerts view." }, - { "summary": "/nsm will properly display disk usage on the standalone Grafana dashboard." }, - { "summary": "The manager node now has syslog listener enabled by default (you'll still need to allow syslog traffic through the firewall of course)." }, - { "summary": "Fixed an issue when creating host groups with so-firewall." }, - { "summary": "Known Issues
  • It is still possible to update your grid from any release candidate to 2.3. However, if you have a true production deployment, then we recommend a fresh image and install for best results.
  • In 2.3.0 we made some changes to data types in the elastic index templates. This will cause some errors in Kibana around field conflicts. You can address this in 2 ways:
    1. Delete all the data on the ES nodes preserving all of your other settings suchs as BPFs by running sudo so-elastic-clear on all the search nodes
    2. Re-Index the data. This is not a quick process but you can find more information at https://docs.securityonion.net/en/2.3/elasticsearch.html#re-indexing
  • Please be patient as we update our documentation. We have made a concerted effort to update as much as possible but some things still may be incorrect or ommited. If you have questions or feedback, please start a discussion at https://securityonion.net/discuss.
  • Once you update your grid to 2.3.0, any new nodes that join the grid must be 2.3.0. For example, if you try to join a new RC1 node it will fail. For best results, use the latest ISO (or 2.3.0 installer from github) when joining to an 2.3.0 grid.
  • Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5.
  • When running soup to upgrade from RC1/RC2/RC3 to 2.3.0, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates.
  • When Search Nodes are upgraded from RC1 to 2.3.0, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:
    1. Stop elasticsearch - sudo so-elasticsearch-stop
    2. Run the SSL state - sudo salt-call state.apply ssl
    3. Restart elasticsearch - sudo so-elasticsearch-restart
  • If you are upgrading from RC1 you might see errors around registry:2 missing. This error does not break the actual upgrade. To fix, run the following on the manager:
    1. Stop the Docker registry - sudo docker stop so-dockerregistry
    2. Remove the container - sudo docker rm so-dockerregistry
    3. Run the registry state - sudo salt-call state.apply registry
" } + { "summary": "UEFI installs with multiple disks should work as intended now." }, + { "summary": "Telegraf scripts will now make sure they are not already running before execution." }, + { "summary": "You are now prompted during setup if you want to change the docker IP range. If you change this it needs to be the same on all nodes in the grid." }, + { "summary": "Soup will now download the new containers before stopping anything. If anything fails it will now exit and leave the grid at the current version." }, + { "summary": "All containers are now hosted on quay.io to prevent pull limitations. We are now using GPG keys to determine if the image is from Security Onion." }, + { "summary": "Osquery installers have been updated to osquery 4.5.1." }, + { "summary": "Fix for bug where Playbook was not removing the Elastalert rules for inactive Plays." }, + { "summary": "Exifdata reported by Strelka is now constrained to a single multi-valued field to prevent mapping explosion (scan.exiftool)." }, + { "summary": "Resolved issue with Navigator layer(s) not loading correctly." }, + { "summary": "Wazuh authd is now started by default on port 1515/tcp." }, + { "summary": "Wazuh API default credentials are now removed after setup. Scripts have been added for API user management." }, + { "summary": "Upgraded Salt to 3002.2 due to CVEs." }, + { "summary": "If salt-minion is unable to apply states after the defined threshold, we assume salt-minion is in a bad state and the salt-minion service will be restarted." }, + { "summary": "Fixed bug that prevented mysql from installing for Fleet if Playbook wasn't also installed." }, + { "summary": "so-status will now show STARTING or WAIT_START, instead of ERROR, if so-status is run before a salt highstate has started or finished for the first time after system startup" }, + { "summary": "Stenographer can now be disabled on a sensor node by setting the pillar steno:enabled:false in it's minion.sls file or globally if set in the global.sls file" }, + { "summary": "Added so-ssh-harden script that runs the commands listed in https://docs.securityonion.net/en/2.3/ssh.html" }, + { "summary": "NGINX now redirects the browser to the hostname/IP address/FQDN based on global:url_base" }, + { "summary": "MySQL state now waits for MySQL server to respond to a query before completeing" }, + { "summary": "Added Analyst option to network installs" }, + { "summary": "Acknowledging (and Escalating) alerts did not consistently remove the alert from the visible list; this has been corrected." }, + { "summary": "Escalating alerts that have a rule.case_template field defined will automatically assign that case template to the case generated in TheHive." }, + { "summary": "Alerts and Hunt interface quick action bar has been converted into a vertical menu to improve quick action option clarity. Related changes also eliminated the issues that occurred when the quick action bar was appearing to the left of the visible browser area." }, + { "summary": "Updated Go to newer version to fix a timezone, daylight savings time (DST) issue that resulted in Alerts and Hunt interfaces not consistently showing results." }, + { "summary": "Improved Hunt and Alert table sorting." }, + { "summary": "Alerts interface now allows absolute time searches." }, + { "summary": "Alerts interface 'Hunt' quick action is now working as intended." }, + { "summary": "Alerts interface 'Ack' icon tooltip has been changed from 'Dismiss' to 'Acknowledge' for consistency." }, + { "summary": "Hunt interface bar charts will now show the quick action menu when clicked instead of assuming the click was intended to add an include filter." }, + { "summary": "Hunt interface quick action will now cast a wider net on field searches." }, + { "summary": "Now explicitly preventing the use of a dollar sign ($) character in web user passwords during setup." }, + { "summary": "Cortex container will now restart properly if the SO host was not gracefully shutdown." }, + { "summary": "Added syslog plugin to the logstash container; this is not in-use by default but available for those users that choose to use it." }, + { "summary": "Winlogbeat download package is now available from the SOC Downloads interface." }, + { "summary": "Upgraded Kratos authentication system." }, + { "summary": "Added new Reset Defaults button to the SOC Profile Settings interface which allows users to reset all local browser SOC customizations back to their defaults. This includes things like default sort column, sort order, items per page, etc." }, + { "summary": "Known Issues
  • Following the Salt minion upgrade on remote nodes, the salt-minion service may not restart properly. If this occurs, you can ssh to the minion and run sudo systemctl restart salt-minion. If you do not want to connect to each node and manually restart the salt-minion, the new salt-minion watch process will restart it automatically after 1 hour.
  • During soup, you may see the following during the first highstate run, it can be ignored: Rendering SLS '' failed: Jinja variable 'list object' has no attribute 'values'. The second highstate will complete without that error.
" } ] } diff --git a/salt/soc/files/soc/hunt.actions.json b/salt/soc/files/soc/hunt.actions.json index 82f9731ed..b825c0131 100644 --- a/salt/soc/files/soc/hunt.actions.json +++ b/salt/soc/files/soc/hunt.actions.json @@ -1,5 +1,6 @@ [ - { "name": "", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" }, - { "name": "", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" }, - { "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" } + { "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" }, + { "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" }, + { "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" }, + { "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" } ] \ No newline at end of file diff --git a/salt/soc/files/soc/hunt.eventfields.json b/salt/soc/files/soc/hunt.eventfields.json index f7cfb53e3..e8af03a5c 100644 --- a/salt/soc/files/soc/hunt.eventfields.json +++ b/salt/soc/files/soc/hunt.eventfields.json @@ -37,7 +37,7 @@ "::firewall": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "network.direction", "interface.name", "rule.action", "rule.reason", "network.community_id" ], ":osquery:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ], ":ossec:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.level", "rule.category", "process.name", "user.name", "user.escalated", "location" ], - ":strelka:file": ["soc_timestamp", "scan.exiftool.OriginalFileName", "file.size", "hash.md5", "scan.exiftool.CompanyName", "scan.exiftool.Description", "scan.exiftool.Directory", "scan.exiftool.FileType", "scan.exiftool.FileOS", "log.id.fuid" ], + ":strelka:file": ["soc_timestamp", "file.name", "file.size", "hash.md5", "file.source", "file.mime_type", "log.id.fuid" ], ":suricata:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.category", "event.severity_label", "log.id.uid", "network.community_id" ], ":sysmon:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ], ":windows_eventlog:": ["soc_timestamp", "user.name" ] diff --git a/salt/soc/files/soc/hunt.queries.json b/salt/soc/files/soc/hunt.queries.json index f2c3a633a..57027dc0f 100644 --- a/salt/soc/files/soc/hunt.queries.json +++ b/salt/soc/files/soc/hunt.queries.json @@ -10,7 +10,7 @@ { "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.escalated.keyword"}, { "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event type", "query": "event.module:sysmon | groupby event.dataset"}, { "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event.module:sysmon | groupby event.dataset, user.name.keyword"}, - { "name": "Strelka", "description": "Show all Strelka logs grouped by file type", "query": "event.module:strelka | groupby scan.exiftool.FileType"}, + { "name": "Strelka", "description": "Show all Strelka logs grouped by file type", "query": "event.module:strelka | groupby file.mime_type"}, { "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.dataset:notice | groupby notice.note notice.message"}, { "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"}, { "name": "Connections", "description": "Connections grouped by Service", "query": "event.dataset:conn | groupby network.protocol destination.port"}, diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 61c4ab6bb..5bb348309 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -9,6 +9,8 @@ {%- import_json "soc/files/soc/hunt.queries.json" as hunt_queries %} {%- import_json "soc/files/soc/hunt.actions.json" as hunt_actions %} {%- import_json "soc/files/soc/hunt.eventfields.json" as hunt_eventfields %} +{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %} + { "logFilename": "/opt/sensoroni/logs/sensoroni-server.log", "server": { @@ -33,7 +35,7 @@ {%- if salt['pillar.get']('nodestab', {}) %} "remoteHostUrls": [ {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} - "https://{{ SN.split('_')|first }}:9200"{{ "," if not loop.last }} + "https://{{ SN.split('_')|first }}:9200"{{ "," if not loop.last else ""}} {%- endfor %} ], {%- endif %} @@ -49,15 +51,17 @@ }, {% endif %} "statickeyauth": { - "anonymousCidr": "172.17.0.0/24", + "anonymousCidr": "{{ DNET }}/24", "apiKey": "{{ SENSORONIKEY }}" } }, "client": { {%- if ISAIRGAP is sameas true %} "docsUrl": "/docs/", + "cheatsheetUrl": "/docs/cheatsheet.pdf", {%- else %} "docsUrl": "https://docs.securityonion.net/en/2.3/", + "cheatsheetUrl": "https://github.com/Security-Onion-Solutions/securityonion-docs/raw/2.3/images/cheat-sheet/Security-Onion-Cheat-Sheet.pdf", {%- endif %} "hunt": { "advanced": true, @@ -68,7 +72,7 @@ "relativeTimeValue": 24, "relativeTimeUnit": 30, "mostRecentlyUsedLimit": 5, - "dismissEnabled": false, + "ackEnabled": false, "escalateEnabled": {{ 'true' if THEHIVEKEY != '' else 'false' }}, "eventFields": {{ hunt_eventfields | json }}, "queryBaseFilter": "", @@ -85,7 +89,7 @@ "relativeTimeValue": 24, "relativeTimeUnit": 30, "mostRecentlyUsedLimit": 5, - "dismissEnabled": true, + "ackEnabled": true, "escalateEnabled": {{ 'true' if THEHIVEKEY != '' else 'false' }}, "eventFields": {{ alerts_eventfields | json }}, "queryBaseFilter": "event.dataset:alert", diff --git a/salt/soc/init.sls b/salt/soc/init.sls index 012dae330..cc8aee048 100644 --- a/salt/soc/init.sls +++ b/salt/soc/init.sls @@ -67,6 +67,11 @@ so-soc: - watch: - file: /opt/so/conf/soc/* +append_so-soc_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-soc + # Add Kratos Group kratosgroup: group.present: @@ -119,6 +124,11 @@ so-kratos: - watch: - file: /opt/so/conf/kratos +append_so-kratos_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-kratos + {% else %} soc_state_not_allowed: diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf index 1b2e5fd3d..29f31f95f 100644 --- a/salt/soctopus/files/SOCtopus.conf +++ b/salt/soctopus/files/SOCtopus.conf @@ -1,4 +1,5 @@ {%- set MANAGER = salt['pillar.get']('global:url_base', '') %} +{%- set URLBASE = salt['pillar.get']('global:url_base', '') %} {%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} {%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} {%- set PLAYBOOK_KEY = salt['pillar.get']('playbook:api_key', '') %} @@ -14,7 +15,7 @@ es_verifycert = no [cortex] auto_analyze_alerts = no -cortex_url = https://{{MANAGER}}/cortex/ +cortex_url = https://{{URLBASE}}/cortex/ cortex_key = {{ CORTEXKEY }} supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS @@ -35,7 +36,7 @@ grr_user = YOURGRRUSER grr_pass = YOURGRRPASS [hive] -hive_url = https://{{MANAGER}}/thehive/ +hive_url = https://{{URLBASE}}/thehive/ hive_key = {{ HIVEKEY }} hive_tlp = 3 hive_verifycert = no @@ -66,7 +67,7 @@ soc_url = http://{{MANAGER}}:9822 [playbook] playbook_url = http://{{MANAGER}}:3200/playbook -playbook_ext_url = https://{{MANAGER}}/playbook +playbook_ext_url = https://{{URLBASE}}/playbook playbook_key = {{ PLAYBOOK_KEY }} playbook_verifycert = no playbook_unit_test_index = playbook-testing diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls index 2c9e721ac..2137a4511 100644 --- a/salt/soctopus/init.sls +++ b/salt/soctopus/init.sls @@ -73,6 +73,11 @@ so-soctopus: - extra_hosts: - {{MANAGER_URL}}:{{MANAGER_IP}} +append_so-soctopus_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-soctopus + {% else %} soctopus_state_not_allowed: diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls index dabd58fe5..8748cbe50 100644 --- a/salt/strelka/init.sls +++ b/salt/strelka/init.sls @@ -87,6 +87,11 @@ strelka_coordinator: - port_bindings: - 0.0.0.0:6380:6379 +append_so-strelka-coordinator_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-strelka-coordinator + strelka_gatekeeper: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-redis:{{ VERSION }} @@ -95,6 +100,11 @@ strelka_gatekeeper: - port_bindings: - 0.0.0.0:6381:6379 +append_so-strelka-gatekeeper_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-strelka-gatekeeper + strelka_frontend: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-frontend:{{ VERSION }} @@ -107,6 +117,11 @@ strelka_frontend: - port_bindings: - 0.0.0.0:57314:57314 +append_so-strelka-frontend_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-strelka-frontend + strelka_backend: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-backend:{{ VERSION }} @@ -117,6 +132,11 @@ strelka_backend: - command: strelka-backend - restart_policy: on-failure +append_so-strelka-backend_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-strelka-backend + strelka_manager: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-manager:{{ VERSION }} @@ -125,6 +145,11 @@ strelka_manager: - name: so-strelka-manager - command: strelka-manager +append_so-strelka-manager_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-strelka-manager + strelka_filestream: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-filestream:{{ VERSION }} @@ -133,6 +158,11 @@ strelka_filestream: - /nsm/strelka:/nsm/strelka - name: so-strelka-filestream - command: strelka-filestream + +append_so-strelka-filestream_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-strelka-filestream strelka_zeek_extracted_sync: cron.present: diff --git a/salt/suricata/afpacket.map.jinja b/salt/suricata/afpacket.map.jinja index 37b80aa87..a6c390abb 100644 --- a/salt/suricata/afpacket.map.jinja +++ b/salt/suricata/afpacket.map.jinja @@ -7,9 +7,9 @@ af-packet: use-mmap: yes threads: {{ salt['pillar.get']('sensor:suriprocs', salt['pillar.get']('sensor:suripins') | length) }} tpacket-v3: yes - ring-size: {{ salt['pillar.get']('sensor:suriringsize', '2048') }} + ring-size: {{ salt['pillar.get']('sensor:suriringsize', '5000') }} - interface: default #threads: auto #use-mmap: no #tpacket-v3: yes -{% endload %} \ No newline at end of file +{% endload %} diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 6245b9e51..0c50bb5d1 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -163,6 +163,11 @@ so-suricata: - file: /opt/so/conf/suricata/rules/ - file: /opt/so/conf/suricata/bpf +append_so-suricata_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-suricata + surilogrotate: file.managed: - name: /opt/so/conf/suricata/suri-rotate.conf diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls index 0bbf131f7..8d400ca1e 100644 --- a/salt/telegraf/init.sls +++ b/salt/telegraf/init.sls @@ -26,7 +26,7 @@ tgrafetsdir: tgrafsyncscripts: file.recurse: - name: /opt/so/conf/telegraf/scripts - - user: 0 + - user: root - group: 939 - file_mode: 700 - template: jinja @@ -73,6 +73,11 @@ so-telegraf: - file: tgrafconf - file: tgrafsyncscripts +append_so-telegraf_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-telegraf + {% else %} telegraf_state_not_allowed: diff --git a/salt/telegraf/scripts/checkfiles.sh b/salt/telegraf/scripts/checkfiles.sh index a22735696..4b6a8493a 100644 --- a/salt/telegraf/scripts/checkfiles.sh +++ b/salt/telegraf/scripts/checkfiles.sh @@ -1,4 +1,28 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=checkfiles +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf FILES=$(ls -1x /host/nsm/faf/complete/ | wc -l) diff --git a/salt/telegraf/scripts/helixeps.sh b/salt/telegraf/scripts/helixeps.sh index eee4f65c3..aed559932 100644 --- a/salt/telegraf/scripts/helixeps.sh +++ b/salt/telegraf/scripts/helixeps.sh @@ -1,4 +1,28 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=helixeps +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf PREVCOUNTFILE='/tmp/helixevents.txt' EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.pipelines.helix.events.out')" diff --git a/salt/telegraf/scripts/influxdbsize.sh b/salt/telegraf/scripts/influxdbsize.sh index 7060942ae..4e74c4cf5 100644 --- a/salt/telegraf/scripts/influxdbsize.sh +++ b/salt/telegraf/scripts/influxdbsize.sh @@ -1,4 +1,28 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=influxsize +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf INFLUXSIZE=$(du -s -k /host/nsm/influxdb | awk {'print $1'}) diff --git a/salt/telegraf/scripts/oldpcap.sh b/salt/telegraf/scripts/oldpcap.sh index 970c47589..b8d383112 100644 --- a/salt/telegraf/scripts/oldpcap.sh +++ b/salt/telegraf/scripts/oldpcap.sh @@ -1,4 +1,28 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=oldpcap +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf # Get the data OLDPCAP=$(find /host/nsm/pcap -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'}) diff --git a/salt/telegraf/scripts/redis.sh b/salt/telegraf/scripts/redis.sh index a91e1f2dc..9f5dbd37f 100644 --- a/salt/telegraf/scripts/redis.sh +++ b/salt/telegraf/scripts/redis.sh @@ -1,4 +1,29 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +APP=redis +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf UNPARSED=$(redis-cli llen logstash:unparsed | awk '{print $1}') PARSED=$(redis-cli llen logstash:parsed | awk '{print $1}') diff --git a/salt/telegraf/scripts/stenoloss.sh b/salt/telegraf/scripts/stenoloss.sh index 1b60f0517..d078284a4 100644 --- a/salt/telegraf/scripts/stenoloss.sh +++ b/salt/telegraf/scripts/stenoloss.sh @@ -1,4 +1,29 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +APP=stenoloss +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf # Get the data DROP=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop | awk '{print $14}' | awk -F "=" '{print $2}') diff --git a/salt/telegraf/scripts/suriloss.sh b/salt/telegraf/scripts/suriloss.sh index 48745c161..cc2cff94c 100644 --- a/salt/telegraf/scripts/suriloss.sh +++ b/salt/telegraf/scripts/suriloss.sh @@ -1,4 +1,29 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +APP=suriloss +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf SURILOG=$(tac /var/log/suricata/stats.log | grep kernel | head -4) CHECKIT=$(echo $SURILOG | grep -o 'drop' | wc -l) diff --git a/salt/telegraf/scripts/zeekcaptureloss.sh b/salt/telegraf/scripts/zeekcaptureloss.sh index a2e350212..36962e109 100644 --- a/salt/telegraf/scripts/zeekcaptureloss.sh +++ b/salt/telegraf/scripts/zeekcaptureloss.sh @@ -1,6 +1,32 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + # This script returns the average of all the workers average capture loss to telegraf / influxdb in influx format include nanosecond precision timestamp +APP=zeekcaploss +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + if [ -d "/host/nsm/zeek/spool/logger" ]; then WORKERS={{ salt['pillar.get']('sensor:zeek_lbprocs', salt['pillar.get']('sensor:zeek_pins') | length) }} ZEEKLOG=/host/nsm/zeek/spool/logger/capture_loss.log @@ -23,4 +49,4 @@ if [ -f "$ZEEKLOG" ]; then fi fi echo "$CURRENTTS" > $LASTCAPTURELOSSLOG -fi \ No newline at end of file +fi diff --git a/salt/telegraf/scripts/zeekloss.sh b/salt/telegraf/scripts/zeekloss.sh index 579fdf9f2..9a64ef4dd 100644 --- a/salt/telegraf/scripts/zeekloss.sh +++ b/salt/telegraf/scripts/zeekloss.sh @@ -1,5 +1,31 @@ #!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + # This script returns the packets dropped by Zeek, but it isn't a percentage. $LOSS * 100 would be the percentage + +APP=zeekloss +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + ZEEKLOG=$(tac /host/nsm/zeek/logs/packetloss.log | head -2) declare RESULT=($ZEEKLOG) CURRENTDROP=${RESULT[3]} @@ -14,4 +40,4 @@ else TOTAL=$((CURRENTPACKETS - PASTPACKETS)) LOSS=$(echo $DROPPED $TOTAL / p | dc) echo "zeekdrop drop=$LOSS" -fi \ No newline at end of file +fi diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf index d84566068..88bea88df 100644 --- a/salt/thehive/etc/cortex-application.conf +++ b/salt/thehive/etc/cortex-application.conf @@ -6,6 +6,7 @@ # WARNING: If you deploy your application on several servers, make sure to use the same key. play.http.secret.key="{{ CORTEXPLAYSECRET }}" play.http.context=/cortex/ +pidfile.path = "/dev/null" search.uri = "http://{{ MANAGERIP }}:9400" # Elasticsearch diff --git a/salt/thehive/init.sls b/salt/thehive/init.sls index 443ac9a8f..e695c237f 100644 --- a/salt/thehive/init.sls +++ b/salt/thehive/init.sls @@ -102,6 +102,11 @@ so-thehive-es: - 0.0.0.0:9400:9400 - 0.0.0.0:9500:9500 +append_so-thehive-es_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-thehive-es + # Install Cortex so-cortex: docker_container.running: @@ -116,6 +121,11 @@ so-cortex: - port_bindings: - 0.0.0.0:9001:9001 +append_so-cortex_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-cortex + cortexscript: cmd.script: - source: salt://thehive/scripts/cortex_init @@ -136,6 +146,11 @@ so-thehive: - port_bindings: - 0.0.0.0:9000:9000 +append_so-thehive_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-thehive + thehivescript: cmd.script: - source: salt://thehive/scripts/hive_init diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init index 6f5d890ae..beade9c4b 100644 --- a/salt/thehive/scripts/cortex_init +++ b/salt/thehive/scripts/cortex_init @@ -1,5 +1,5 @@ #!/bin/bash -# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +# {%- set URLBASE = salt['pillar.get']('global:url_base', '') %} # {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %} # {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %} # {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %} @@ -17,7 +17,7 @@ cortex_clean(){ cortex_init(){ sleep 60 - CORTEX_IP="{{MANAGERIP}}" + CORTEX_API_URL="{{URLBASE}}/cortex/api" CORTEX_USER="{{CORTEXUSER}}" CORTEX_PASSWORD="{{CORTEXPASSWORD}}" CORTEX_KEY="{{CORTEXKEY}}" @@ -29,31 +29,30 @@ cortex_init(){ # Migrate DB - curl -v -k -XPOST "https://$CORTEX_IP:/cortex/api/maintenance/migrate" + curl -v -k -XPOST -L "https://$CORTEX_API_URL/maintenance/migrate" # Create intial Cortex superadmin - curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}" + curl -v -k -L "https://$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}" # Create user-supplied org - curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}" + curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}" # Create user-supplied org user - curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }" + curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }" # Enable URLScan.io Analyzer - curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}' + curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}' # Enable Cert PassiveDNS Analyzer - curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}' + curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}' # Revoke $CORTEX_USER key - curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" "https:///$CORTEX_IP/api/user/$CORTEX_USER/key" + curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "https://$CORTEX_API_URL/user/$CORTEX_USER/key" # Update SOCtopus config with apikey value #sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG touch /opt/so/state/cortex.txt - } if [ -f /opt/so/state/cortex.txt ]; then @@ -61,7 +60,7 @@ if [ -f /opt/so/state/cortex.txt ]; then exit 0 else rm -f garbage_file - while ! wget -O garbage_file {{MANAGERIP}}:9500 2>/dev/null + while ! wget -O garbage_file {{URLBASE}}:9500 2>/dev/null do echo "Waiting for Elasticsearch..." rm -f garbage_file diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init index c44af6339..51eefeac8 100755 --- a/salt/thehive/scripts/hive_init +++ b/salt/thehive/scripts/hive_init @@ -1,5 +1,6 @@ #!/bin/bash # {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +# {%- set URLBASE = salt['pillar.get']('global:url_base', '') %} # {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %} # {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %} # {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} @@ -11,7 +12,8 @@ thehive_clean(){ thehive_init(){ sleep 120 - THEHIVE_IP="{{MANAGERIP}}" + THEHIVE_URL="{{URLBASE}}/thehive" + THEHIVE_API_URL="$THEHIVE_URL/api" THEHIVE_USER="{{THEHIVEUSER}}" THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}" THEHIVE_KEY="{{THEHIVEKEY}}" @@ -21,7 +23,7 @@ thehive_init(){ COUNT=0 THEHIVE_CONNECTED="no" while [[ "$COUNT" -le 240 ]]; do - curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive" + curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_URL" if [ $? -eq 0 ]; then THEHIVE_CONNECTED="yes" echo "connected!" @@ -36,15 +38,15 @@ thehive_init(){ if [ "$THEHIVE_CONNECTED" == "yes" ]; then # Migrate DB - curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate" + curl -v -k -XPOST -L "https://$THEHIVE_API_URL/maintenance/migrate" # Create intial TheHive user - curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}" + curl -v -k -L "https://$THEHIVE_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}" # Pre-load custom fields # # reputation - curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}" + curl -v -k -L "https://$THEHIVE_API_URL/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}" touch /opt/so/state/thehive.txt diff --git a/salt/top.sls b/salt/top.sls index 2b55c4724..bbd2a862d 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -6,6 +6,13 @@ {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %} {% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %} {% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %} +{% set ELASTALERT = salt['pillar.get']('elastalert:enabled', True) %} +{% set ELASTICSEARCH = salt['pillar.get']('elasticsearch:enabled', True) %} +{% set FILEBEAT = salt['pillar.get']('filebeat:enabled', True) %} +{% set KIBANA = salt['pillar.get']('kibana:enabled', True) %} +{% set LOGSTASH = salt['pillar.get']('logstash:enabled', True) %} +{% set CURATOR = salt['pillar.get']('curator:enabled', True) %} +{% set REDIS = salt['pillar.get']('redis:enabled', True) %} {% set STRELKA = salt['pillar.get']('strelka:enabled', '0') %} {% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} {% import_yaml 'salt/minion.defaults.yaml' as saltversion %} @@ -15,6 +22,7 @@ base: 'not G@saltversion:{{saltversion}}': - match: compound + - salt.minion-state-apply-test {% if ISAIRGAP is sameas true %} - airgap {% endif %} @@ -35,13 +43,14 @@ base: - common - patch.os.schedule - motd + - salt.minion-check + - salt.lasthighstate '*_helix and G@saltversion:{{saltversion}}': - match: compound - salt.master - ca - ssl - - common - registry - telegraf - firewall @@ -51,15 +60,18 @@ base: - suricata - zeek - redis + {%- if LOGSTASH %} - logstash + {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} - schedule '*_sensor and G@saltversion:{{saltversion}}': - match: compound - ca - ssl - - common - telegraf - firewall - nginx @@ -85,7 +97,6 @@ base: - salt.master - ca - ssl - - common - registry - manager - nginx @@ -97,14 +108,18 @@ base: - idstools - suricata.manager - healthcheck - {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} + {%- if (FLEETMANAGER or FLEETNODE) or PLAYBOOK != 0 %} - mysql {%- endif %} {%- if WAZUH != 0 %} - wazuh {%- endif %} + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} + {%- if KIBANA %} - kibana + {%- endif %} - pcap - suricata {%- if ZEEKVER != 'SURICATA' %} @@ -113,9 +128,15 @@ base: {%- if STRELKA %} - strelka {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} + {%- if CURATOR %} - curator + {%- endif %} + {%- if ELASTALERT %} - elastalert + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet - redis @@ -144,7 +165,6 @@ base: - salt.master - ca - ssl - - common - registry - nginx - telegraf @@ -155,18 +175,30 @@ base: - manager - idstools - suricata.manager - {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} + {%- if (FLEETMANAGER or FLEETNODE) or PLAYBOOK != 0 %} - mysql {%- endif %} {%- if WAZUH != 0 %} - wazuh {%- endif %} + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} + {%- if LOGSTASH %} - logstash + {%- endif %} + {%- if REDIS %} - redis + {%- endif %} + {%- if KIBANA %} - kibana + {%- endif %} + {%- if ELASTALERT %} - elastalert + {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} - utility - schedule {%- if FLEETMANAGER or FLEETNODE %} @@ -193,7 +225,6 @@ base: - salt.master - ca - ssl - - common - registry - manager - nginx @@ -205,16 +236,24 @@ base: - idstools - suricata.manager - healthcheck - {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} + {%- if (FLEETMANAGER or FLEETNODE) or PLAYBOOK != 0 %} - mysql {%- endif %} {%- if WAZUH != 0 %} - wazuh {%- endif %} + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} + {%- if LOGSTASH %} - logstash + {%- endif %} + {%- if REDIS %} - redis + {%- endif %} + {%- if KIBANA %} - kibana + {%- endif %} - pcap - suricata {%- if ZEEKVER != 'SURICATA' %} @@ -223,9 +262,15 @@ base: {%- if STRELKA %} - strelka {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} + {%- if CURATOR %} - curator + {%- endif %} + {%- if ELASTALERT %} - elastalert + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet - fleet.install_package @@ -251,9 +296,10 @@ base: '*_node and I@node:node_type:parser and G@saltversion:{{saltversion}}': - match: compound - - common - firewall + {%- if LOGSTASH %} - logstash + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet.install_package {%- endif %} @@ -262,10 +308,13 @@ base: '*_node and I@node:node_type:hot and G@saltversion:{{saltversion}}': - match: compound - - common - firewall + {%- if LOGSTASH %} - logstash + {%- endif %} + {%- if CURATOR %} - curator + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet.install_package {%- endif %} @@ -274,9 +323,10 @@ base: '*_node and I@node:node_type:warm and G@saltversion:{{saltversion}}': - match: compound - - common - firewall + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet.install_package {%- endif %} @@ -287,17 +337,24 @@ base: - match: compound - ca - ssl - - common - nginx - telegraf - firewall {%- if WAZUH != 0 %} - wazuh {%- endif %} + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} + {%- if LOGSTASH %} - logstash + {%- endif %} + {%- if CURATOR %} - curator + {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet.install_package {%- endif %} @@ -306,7 +363,6 @@ base: '*_managersensor and G@saltversion:{{saltversion}}': - match: compound - - common - nginx - telegraf - influxdb @@ -325,7 +381,6 @@ base: - salt.master - ca - ssl - - common - registry - nginx - telegraf @@ -336,19 +391,34 @@ base: - manager - idstools - suricata.manager - {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} + {%- if (FLEETMANAGER or FLEETNODE) or PLAYBOOK != 0 %} - mysql {%- endif %} {%- if WAZUH != 0 %} - wazuh {%- endif %} + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} + {%- if LOGSTASH %} - logstash + {%- endif %} + {%- if REDIS %} - redis + {%- endif %} + {%- if CURATOR %} - curator + {%- endif %} + {%- if KIBANA %} - kibana + {%- endif %} + {%- if ELASTALERT %} - elastalert + {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} + - utility - schedule {%- if FLEETMANAGER or FLEETNODE %} @@ -374,18 +444,27 @@ base: - match: compound - ca - ssl - - common - nginx - telegraf - firewall {%- if WAZUH != 0 %} - wazuh {%- endif %} + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} + {%- if LOGSTASH %} - logstash + {%- endif %} + {%- if REDIS %} - redis + {%- endif %} + {%- if CURATOR %} - curator + {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} {%- if STRELKA %} - strelka {%- endif %} @@ -397,7 +476,9 @@ base: {%- if ZEEKVER != 'SURICATA' %} - zeek {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} - schedule - docker_clean @@ -405,7 +486,6 @@ base: - match: compound - ca - ssl - - common - nginx - telegraf - firewall @@ -421,7 +501,6 @@ base: - salt.master - ca - ssl - - common - registry - manager - nginx @@ -430,9 +509,15 @@ base: - idstools - suricata.manager - pcap + {%- if ELASTICSEARCH %} - elasticsearch + {%- endif %} + {%- if KIBANA %} - kibana + {%- endif %} + {%- if FILEBEAT %} - filebeat + {%- endif %} - utility - suricata - zeek diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams index e67ce9f57..6998c7669 100644 --- a/salt/utility/bin/crossthestreams +++ b/salt/utility/bin/crossthestreams @@ -9,7 +9,7 @@ echo -n "Waiting for ElasticSearch..." COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 30 ]]; do - curl --output /dev/null --silent --head --fail http://{{ ES }}:9200 + curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200 if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" echo "connected!" @@ -29,7 +29,7 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then fi echo "Applying cross cluster search config..." - curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \ + curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \ -H 'Content-Type: application/json' \ -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" @@ -37,6 +37,6 @@ echo "Applying cross cluster search config..." {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} -curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}' +curl -XPUT -L http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}' {%- endfor %} {%- endif %} diff --git a/salt/utility/bin/eval b/salt/utility/bin/eval index 87692e40f..f63a61942 100644 --- a/salt/utility/bin/eval +++ b/salt/utility/bin/eval @@ -6,7 +6,7 @@ echo -n "Waiting for ElasticSearch..." COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 30 ]]; do - curl --output /dev/null --silent --head --fail http://{{ ES }}:9200 + curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200 if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" echo "connected!" @@ -26,6 +26,6 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then fi echo "Applying cross cluster search config..." - curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \ + curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \ -H 'Content-Type: application/json' \ -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" diff --git a/salt/wazuh/files/agent/wazuh-register-agent b/salt/wazuh/files/agent/wazuh-register-agent index c6411b492..da4870e47 100755 --- a/salt/wazuh/files/agent/wazuh-register-agent +++ b/salt/wazuh/files/agent/wazuh-register-agent @@ -47,51 +47,54 @@ cat < try to register the agent -sleep 30s -STATUS=$(curl -s -k -u $USER:$PASSWORD $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID | jq .data.status | sed s'/"//g') -if [[ $STATUS == "Active" ]]; then +if [ -f /opt/so/conf/wazuh/initial_agent_registration.log ]; then echo "Agent $AGENT_ID already registered!" + exit 0 else + echo "Waiting before registering agent..." + sleep 30s register_agent + cleanup_creds + echo "Initial agent $AGENT_ID with IP $AGENT_IP registered on $DATE." > /opt/so/conf/wazuh/initial_agent_registration.log + exit 0 fi #remove_agent diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index f2a4ae05b..19afa48d7 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -65,7 +65,7 @@ wazuhagentconf: file.managed: - name: /var/ossec/etc/ossec.conf - source: salt://wazuh/files/agent/ossec.conf - - user: 0 + - user: root - group: 945 - template: jinja @@ -81,8 +81,8 @@ wazuhagentregister: file.managed: - name: /usr/sbin/wazuh-register-agent - source: salt://wazuh/files/agent/wazuh-register-agent - - user: 0 - - group: 0 + - user: root + - group: root - mode: 755 - template: jinja @@ -91,8 +91,8 @@ wazuhmgrwhitelist: file.managed: - name: /usr/sbin/wazuh-manager-whitelist - source: salt://wazuh/files/wazuh-manager-whitelist - - user: 0 - - group: 0 + - user: root + - group: root - mode: 755 - template: jinja @@ -110,6 +110,11 @@ so-wazuh: - binds: - /nsm/wazuh:/var/ossec/data:rw +append_so-wazuh_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-wazuh + # Register the agent registertheagent: cmd.run: @@ -123,15 +128,15 @@ whitelistmanager: - name: /usr/sbin/wazuh-manager-whitelist - cwd: / +/opt/so/conf/wazuh: + file.symlink: + - target: /nsm/wazuh/etc + wazuhagentservice: service.running: - name: wazuh-agent - enable: True -/opt/so/conf/wazuh: - file.symlink: - - target: /nsm/wazuh/etc - hidsruledir: file.directory: - name: /opt/so/rules/hids diff --git a/salt/yum/etc/yum.conf.jinja b/salt/yum/etc/yum.conf.jinja index 22449083e..bef9c2128 100644 --- a/salt/yum/etc/yum.conf.jinja +++ b/salt/yum/etc/yum.conf.jinja @@ -11,6 +11,6 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }} bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum distroverpkg=centos-release -{% if salt['pillar.get']('global:managerupdate', '0') %} +{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') %} proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142 -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls index 712ca53fd..f6edae136 100644 --- a/salt/zeek/init.sls +++ b/salt/zeek/init.sls @@ -196,6 +196,11 @@ so-zeek: - file: /opt/so/conf/zeek/policy - file: /opt/so/conf/zeek/bpf +append_so-zeek_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-zeek + {% else %} zeek_state_not_allowed: diff --git a/salt/zeek/policy/intel/intel.dat b/salt/zeek/policy/intel/intel.dat new file mode 100644 index 000000000..ca10994b6 --- /dev/null +++ b/salt/zeek/policy/intel/intel.dat @@ -0,0 +1,5 @@ +#fields indicator indicator_type meta.source meta.do_notice +# EXAMPLES: +#66.32.119.38 Intel::ADDR Test Address T +#www.honeynet.org Intel::DOMAIN Test Domain T +#4285358dd748ef74cb8161108e11cb73 Intel::FILE_HASH Test MD5 T diff --git a/screenshots/alerts-1.png b/screenshots/alerts-1.png new file mode 100644 index 000000000..140150c77 Binary files /dev/null and b/screenshots/alerts-1.png differ diff --git a/screenshots/hunt-1.png b/screenshots/hunt-1.png new file mode 100644 index 000000000..aa7ae7c1e Binary files /dev/null and b/screenshots/hunt-1.png differ diff --git a/setup/so-common-functions b/setup/so-common-functions index c3df787cc..8bdf09374 100644 --- a/setup/so-common-functions +++ b/setup/so-common-functions @@ -2,6 +2,7 @@ source ./so-variables source ../salt/common/tools/sbin/so-common +source ../salt/common/tools/sbin/so-image-common # Helper functions @@ -18,12 +19,22 @@ filter_unused_nics() { fi # Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use) - filtered_nics=$(ip link| awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g') + filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g') readarray -t filtered_nics <<< "$filtered_nics" nic_list=() for nic in "${filtered_nics[@]}"; do - nic_list+=("$nic" "" "OFF") + case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in + 1) + nic_list+=("$nic" "Link UP " "OFF") + ;; + 0) + nic_list+=("$nic" "Link DOWN " "OFF") + ;; + *) + nic_list+=("$nic" "Link UNKNOWN " "OFF") + ;; + esac done export nic_list diff --git a/setup/so-functions b/setup/so-functions index 3566d0d5e..a55e78e49 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -19,6 +19,8 @@ source ./so-whiptail source ./so-variables source ./so-common-functions +CONTAINER_REGISTRY=quay.io + SOVERSION=$(cat ../VERSION) log() { @@ -105,6 +107,10 @@ add_manager_hostfile() { whiptail_check_exitstatus $exitstatus } +add_mngr_ip_to_hosts() { + echo "$MSRVIP $MSRV" >> /etc/hosts +} + addtotab_generate_templates() { local addtotab_path=$local_salt_dir/pillar/data @@ -173,7 +179,7 @@ add_web_user() { echo "Attempting to add administrator user for web interface..."; echo "$WEBPASSWD1" | /usr/sbin/so-user add "$WEBUSER"; echo "Add user result: $?"; - } >> "$setup_log" 2>&1 + } >> "/root/so-user-add.log" 2>&1 } # Create an secrets pillar so that passwords survive re-install @@ -264,6 +270,7 @@ check_service_status() { check_salt_master_status() { echo "Checking if we can talk to the salt master" >> "$setup_log" 2>&1 + salt-call saltutil.kill_all_jobs > /dev/null 2>&1 salt-call state.show_top > /dev/null 2>&1 local status=$? #true if there is an issue talking to salt master @@ -430,8 +437,6 @@ configure_minion() { { systemctl restart salt-minion; - printf '%s\n' '----'; - cat "$minion_config"; } >> "$setup_log" 2>&1 } @@ -462,15 +467,15 @@ check_requirements() { if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi elif [[ "$standalone_or_dist" == 'import' ]]; then - req_mem=4 - req_cores=2 + req_mem=4 + req_cores=2 req_nics=1 fi if [[ $setup_type == 'network' ]] ; then if [[ -n $nsm_mount ]]; then if [[ "$standalone_or_dist" == 'import' ]]; then - req_storage=50 + req_storage=50 else req_storage=100 fi @@ -482,7 +487,7 @@ check_requirements() { fi else if [[ "$standalone_or_dist" == 'import' ]]; then - req_storage=50 + req_storage=50 else req_storage=200 fi @@ -493,11 +498,20 @@ check_requirements() { fi if [[ $num_nics -lt $req_nics ]]; then - whiptail_requirements_error "NICs" "$num_nics" "$req_nics" + if [[ $num_nics -eq 1 ]]; then + whiptail_requirements_error "NIC" "$num_nics" "$req_nics" + else + whiptail_requirements_error "NICs" "$num_nics" "$req_nics" + fi fi if [[ $num_cpu_cores -lt $req_cores ]]; then - whiptail_requirements_error "cores" "$num_cpu_cores" "$req_cores" + if [[ $num_cpu_cores -eq 1 ]]; then + whiptail_requirements_error "core" "$num_cpu_cores" "$req_cores" + else + whiptail_requirements_error "cores" "$num_cpu_cores" "$req_cores" + fi + fi if [[ $total_mem_hr -lt $req_mem ]]; then @@ -638,6 +652,10 @@ copy_ssh_key() { mkdir -p /root/.ssh ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh + + echo "Removing old entry for manager from known_hosts if it exists" + sed -i "/${MSRV}/d" /root/.ssh/known_hosts + echo "Copying the SSH key to the manager" #Copy the key over to the manager ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV" @@ -691,7 +709,8 @@ detect_os() { exit 1 fi - echo "Installing required packages to run installer..." >> "$setup_log" 2>&1 + # Print message to stdout so the user knows setup is doing something + echo "Installing required packages to run installer..." # Install bind-utils so the host command exists if [[ ! $is_iso ]]; then if ! command -v host > /dev/null 2>&1; then @@ -725,6 +744,7 @@ detect_os() { exit 1 fi + # Print message to stdout so the user knows setup is doing something echo "Installing required packages to run installer..." # Install network manager so we can do interface stuff if ! command -v nmcli > /dev/null 2>&1; then @@ -734,7 +754,7 @@ detect_os() { systemctl start NetworkManager; } >> "$setup_log" 2<&1 fi - apt-get install -y bc >> "$setup_log" 2>&1 + apt-get install -y bc curl >> "$setup_log" 2>&1 else echo "We were unable to determine if you are using a supported OS." @@ -747,7 +767,7 @@ detect_os() { disable_auto_start() { - if crontab -l 2>&1 | grep so-setup > /dev/null 2>&1; then + if crontab -l -u $INSTALLUSERNAME 2>&1 | grep so-setup > /dev/null 2>&1; then # Remove the automated setup script from crontab, if it exists logCmd "crontab -u $INSTALLUSERNAME -r" fi @@ -840,96 +860,53 @@ docker_registry() { echo "Setting up Docker Registry" >> "$setup_log" 2>&1 mkdir -p /etc/docker >> "$setup_log" 2>&1 + if [ -z "$DOCKERNET" ]; then + DOCKERNET=172.17.0.0 + fi # Make the host use the manager docker registry + DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi printf '%s\n'\ "{"\ - " \"registry-mirrors\": [ \"$proxy:5000\" ]"\ + " \"registry-mirrors\": [ \"$proxy:5000\" ],"\ + " \"bip\": \"$DNETBIP\","\ + " \"default-address-pools\": ["\ + " {"\ + " \"base\" : \"$DOCKERNET\","\ + " \"size\" : 24"\ + " }"\ + " ]"\ "}" > /etc/docker/daemon.json echo "Docker Registry Setup - Complete" >> "$setup_log" 2>&1 } +docker_seed_update() { + local name=$1 + local percent_delta=1 + if [ "$install_type" == 'HELIXSENSOR' ]; then + percent_delta=6 + fi + ((docker_seed_update_percent=docker_seed_update_percent+percent_delta)) + + set_progress_str "$docker_seed_update_percent" "Downloading $name" +} + docker_seed_registry() { local VERSION="$SOVERSION" if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then - if [ "$install_type" == 'IMPORT' ]; then - local TRUSTED_CONTAINERS=(\ - "so-idstools:$VERSION" \ - "so-nginx:$VERSION" \ - "so-filebeat:$VERSION" \ - "so-suricata:$VERSION" \ - "so-soc:$VERSION" \ - "so-steno:$VERSION" \ - "so-elasticsearch:$VERSION" \ - "so-kibana:$VERSION" \ - "so-kratos:$VERSION" \ - "so-suricata:$VERSION" \ - "so-pcaptools:$VERSION" \ - "so-zeek:$VERSION" - ) + if [ "$install_type" == 'IMPORT' ]; then + container_list 'so-import' + elif [ "$install_type" == 'HELIXSENSOR' ]; then + container_list 'so-helix' else - local TRUSTED_CONTAINERS=(\ - "so-nginx:$VERSION" \ - "so-filebeat:$VERSION" \ - "so-logstash:$VERSION" \ - "so-idstools:$VERSION" \ - "so-redis:$VERSION" \ - "so-steno:$VERSION" \ - "so-suricata:$VERSION" \ - "so-telegraf:$VERSION" \ - "so-zeek:$VERSION" - ) + container_list fi - if [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'IMPORT' ]; then - TRUSTED_CONTAINERS=("${TRUSTED_CONTAINERS[@]}" \ - "so-acng:$VERSION" \ - "so-thehive-cortex:$VERSION" \ - "so-curator:$VERSION" \ - "so-domainstats:$VERSION" \ - "so-elastalert:$VERSION" \ - "so-elasticsearch:$VERSION" \ - "so-fleet:$VERSION" \ - "so-fleet-launcher:$VERSION" \ - "so-freqserver:$VERSION" \ - "so-grafana:$VERSION" \ - "so-influxdb:$VERSION" \ - "so-kibana:$VERSION" \ - "so-minio:$VERSION" \ - "so-mysql:$VERSION" \ - "so-pcaptools:$VERSION" \ - "so-playbook:$VERSION" \ - "so-soc:$VERSION" \ - "so-kratos:$VERSION" \ - "so-soctopus:$VERSION" \ - "so-steno:$VERSION" \ - "so-strelka-frontend:$VERSION" \ - "so-strelka-manager:$VERSION" \ - "so-strelka-backend:$VERSION" \ - "so-strelka-filestream:$VERSION" \ - "so-thehive:$VERSION" \ - "so-thehive-es:$VERSION" \ - "so-wazuh:$VERSION" - ) - fi - local percent=25 - for i in "${TRUSTED_CONTAINERS[@]}"; do - if [ "$install_type" != 'HELIXSENSOR' ]; then ((percent=percent+1)); else ((percent=percent+6)); fi - # Pull down the trusted docker image - set_progress_str "$percent" "Downloading $i" - { - - if ! docker pull --disable-content-trust=false docker.io/$IMAGEREPO/"$i"; then - sleep 5 - docker pull --disable-content-trust=false docker.io/$IMAGEREPO/"$i" - fi - # Tag it with the new registry destination - docker tag $IMAGEREPO/"$i" "$HOSTNAME":5000/$IMAGEREPO/"$i" - docker push "$HOSTNAME":5000/$IMAGEREPO/"$i" - #docker rmi $IMAGEREPO/"$i" - } >> "$setup_log" 2>&1 - done + + docker_seed_update_percent=25 + + update_docker_containers 'netinstall' '' 'docker_seed_update' "$setup_log" else tar xvf /nsm/docker-registry/docker/registry.tar -C /nsm/docker-registry/docker >> "$setup_log" 2>&1 rm /nsm/docker-registry/docker/registry.tar >> "$setup_log" 2>&1 @@ -956,10 +933,10 @@ firewall_generate_templates() { local firewall_pillar_path=$local_salt_dir/salt/firewall mkdir -p "$firewall_pillar_path" - cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/ >> "$setup_log" 2>&1 + cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/ >> "$setup_log" 2>&1 - for i in analyst beats_endpoint sensor manager minion osquery_endpoint search_node wazuh_endpoint; do - $default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1 + for i in analyst beats_endpoint sensor manager minion osquery_endpoint search_node wazuh_endpoint; do + $default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1 done } @@ -1003,15 +980,6 @@ get_redirect() { fi } -got_root() { - # Make sure you are root - uid="$(id -u)" - if [ "$uid" -ne 0 ]; then - echo "This script must be run using sudo!" - exit 1 - fi -} - get_minion_type() { local minion_type case "$install_type" in @@ -1053,6 +1021,10 @@ install_cleanup() { # If Mysql is running stop it /usr/sbin/so-mysql-stop + if [[ $setup_type == 'iso' ]]; then + info "Removing so-setup permission entry from sudoers file" + sed -i '/so-setup/d' /etc/sudoers + fi } import_registry_docker() { @@ -1139,12 +1111,17 @@ manager_global() { fi fi + if [ -z "$DOCKERNET" ]; then + DOCKERNET=172.17.0.0 + fi + # Create a global file for global values printf '%s\n'\ "global:"\ " soversion: '$SOVERSION'"\ " hnmanager: '$HNMANAGER'"\ " ntpserver: '$NTPSERVER'"\ + " dockernet: '$DOCKERNET'"\ " proxy: '$PROXY'"\ " mdengine: '$ZEEKVERSION'"\ " ids: '$NIDS'"\ @@ -1418,14 +1395,74 @@ reserve_group_ids() { groupadd -g 946 cyberchef } +reinstall_init() { + info "Putting system in state to run setup again" + + { + # Kill any salt processes + pkill -9 -ef /usr/bin/salt + + # Remove all salt configs + rm -rf /etc/salt/global /etc/salt/minion /etc/salt/master /etc/salt/pki/* + + if command -v docker &> /dev/null; then + # Stop and remove all so-* containers so files can be changed with more safety + docker stop $(docker ps -a -q --filter "name=so-") + docker rm -f $(docker ps -a -q --filter "name=so-") + fi + + local date_string + date_string=$(date +%s) + + # Backup /opt/so since we'll be rebuilding this directory during setup + backup_dir /opt/so "$date_string" + + # Backup directories in /nsm to prevent app errors + backup_dir /nsm/mysql "$date_string" + backup_dir /nsm/wazuh "$date_string" + + # Remove the old launcher package in case the config changes + remove_package launcher-final + + } >> $setup_log 2>&1 +} + +backup_dir() { + dir=$1 + backup_suffix=$2 + + if [[ -d $dir ]]; then + mv "$dir" "${dir}_old_${backup_suffix}" + fi +} + +remove_package() { + local package_name=$1 + if [ $OS = 'centos' ]; then + if rpm -qa | grep -q "$package_name"; then + yum remove -y "$package_name" + fi + else + if dpkg -l | grep -q "$package_name"; then + apt purge -y "$package_name" + fi + fi +} + # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml +# CAUTION! SALT VERSION UDDATES - READ BELOW +# When updating the salt version, also update the version in: +# - securityonion-builds/iso-resources/build.sh +# - securityonion-builds/iso-resources/packages.lst +# - securityonion/salt/salt/master.defaults.yaml +# - securityonion/salt/salt/minion.defaults.yaml saltify() { # Install updates and Salt if [ $OS = 'centos' ]; then set_progress_str 5 'Installing Salt repo' { - sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub; + sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.2/SALTSTACK-GPG-KEY.pub; cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; } >> "$setup_log" 2>&1 set_progress_str 6 'Installing various dependencies' @@ -1442,14 +1479,14 @@ saltify() { # Download Ubuntu Keys in case manager updates = 1 mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 if [[ ! $is_airgap ]]; then - logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub" + logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub" logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg" logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH" logCmd "cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo" fi set_progress_str 7 'Installing salt-master' if [[ ! $is_iso ]]; then - logCmd "yum -y install salt-master-3001.1" + logCmd "yum -y install salt-master-3002.2" fi systemctl enable salt-master >> "$setup_log" 2>&1 ;; @@ -1477,7 +1514,7 @@ saltify() { { if [[ ! $is_iso ]]; then yum -y install epel-release - yum -y install salt-minion-3001.1\ + yum -y install salt-minion-3002.2\ python3\ python36-docker\ python36-dateutil\ @@ -1521,8 +1558,8 @@ saltify() { 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # TODO: should this also be HELIXSENSOR? # Add saltstack repo(s) - wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" + wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" # Add Docker repo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1 @@ -1530,7 +1567,7 @@ saltify() { # Get gpg keys mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 + wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 @@ -1543,7 +1580,7 @@ saltify() { set_progress_str 6 'Installing various dependencies' apt-get -y install sqlite3 argon2 libssl-dev >> "$setup_log" 2>&1 set_progress_str 7 'Installing salt-master' - apt-get -y install salt-master=3001.1+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-master=3002.2+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-master >> "$setup_log" 2>&1 ;; *) @@ -1554,26 +1591,25 @@ saltify() { echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" ;; esac apt-get update >> "$setup_log" 2>&1 set_progress_str 8 'Installing salt-minion & python modules' - apt-get -y install salt-minion=3001.1+ds-1\ - salt-common=3001.1+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-minion=3002.2+ds-1\ + salt-common=3002.2+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-minion salt-common >> "$setup_log" 2>&1 if [ "$OSVER" != 'xenial' ]; then - apt-get -y install python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1 + apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1 else - apt-get -y install python-dateutil python-m2crypto python-mysqldb >> "$setup_log" 2>&1 + apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb >> "$setup_log" 2>&1 fi fi } salt_checkin() { - case "$install_type" in 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # Fix Mine usage { @@ -1643,12 +1679,17 @@ salt_checkin() { done echo " Confirming existence of the CA certificate" - cat /etc/pki/ca.crt + openssl x509 -in /etc/pki/ca.crt -noout -subject -issuer -dates echo " Applyng a mine hack"; salt "$MINION_ID" mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt; salt "$MINION_ID" mine.update; - echo " Confirming salt mine now contain the certificate"; - salt "$MINION_ID" mine.get '*' x509.get_pem_entries; + echo "Confirming salt mine now contains the certificate"; + salt "$MINION_ID" mine.get '*' x509.get_pem_entries | grep -E 'BEGIN CERTIFICATE|END CERTIFICATE'; + if [ $? -eq 0 ]; then + echo "CA in mine" + else + echo "CA not in mine" + fi echo " Applying SSL state"; salt-call state.apply ssl; } >> "$setup_log" 2>&1 @@ -1663,6 +1704,7 @@ salt_checkin() { { salt-call state.apply ca; salt-call state.apply ssl; + salt-call saltutil.sync_modules; } >> "$setup_log" 2>&1 } @@ -1701,10 +1743,12 @@ setup_salt_master_dirs() { if [ "$setup_type" = 'iso' ]; then rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1 rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1 + mkdir -p $local_salt_dir/salt/zeek/policy/intel >> "$setup_log" 2>&1 cp -Rv /home/$INSTALLUSERNAME/SecurityOnion/files/intel.dat $local_salt_dir/salt/zeek/policy/intel/ >> "$setup_log" 2>&1 else cp -Rv ../pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1 cp -Rv ../salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1 + mkdir -p $local_salt_dir/salt/zeek/policy/intel >> "$setup_log" 2>&1 cp -Rv files/intel.dat $local_salt_dir/salt/zeek/policy/intel/ >> "$setup_log" 2>&1 fi @@ -1798,25 +1842,12 @@ set_default_log_size() { set_hostname() { - set_hostname_iso - - if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then - if ! getent hosts "$MSRV"; then - echo "$MSRVIP $MSRV" >> /etc/hosts - fi - fi - -} - -set_hostname_iso() { - hostnamectl set-hostname --static "$HOSTNAME" echo "127.0.0.1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost4 localhost4.localdomain" > /etc/hosts echo "::1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost6 localhost6.localdomain6" >> /etc/hosts echo "$HOSTNAME" > /etc/hostname hostname -F /etc/hostname - } set_initial_firewall_policy() { @@ -1952,7 +1983,7 @@ set_updates() { fi } -set_version() { +mark_version() { # Drop a file with the current version echo "$SOVERSION" > /etc/soversion } diff --git a/setup/so-setup b/setup/so-setup index ab0d809a1..e0c87ace5 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -15,7 +15,15 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +# Make sure you are root before doing anything +uid="$(id -u)" +if [ "$uid" -ne 0 ]; then + echo "This script must be run using sudo!" + exit 1 +fi + cd "$(dirname "$0")" || exit 255 + source ./so-functions source ./so-common-functions source ./so-whiptail @@ -46,6 +54,13 @@ while [[ $# -gt 0 ]]; do esac done +if [[ -f /root/accept_changes ]]; then + is_reinstall=true + + # Move last setup log to backup + mv "$setup_log" "$setup_log.bak" +fi + # Begin Installation pre-processing parse_install_username @@ -106,9 +121,8 @@ case "$setup_type" in esac # Allow execution of SO tools during setup -export PATH=$PATH:../salt/common/tools/sbin - -got_root +local_sbin="$(pwd)/../salt/common/tools/sbin" +export PATH=$PATH:$local_sbin detect_os && detect_cloud set_network_dev_status_list @@ -185,6 +199,10 @@ elif [ "$install_type" = 'HELIXSENSOR' ]; then is_helix=true elif [ "$install_type" = 'IMPORT' ]; then is_import=true +elif [ "$install_type" = 'ANALYST' ]; then + cd .. || exit 255 + ./so-analyst-install + exit 0 fi # Say yes to the dress if its an ISO install @@ -299,7 +317,6 @@ if [[ $is_import ]]; then PLAYBOOK=0 fi - # Start user prompts if [[ $is_helix || $is_sensor ]]; then @@ -310,9 +327,8 @@ if [[ $is_helix || $is_sensor || $is_import ]]; then calculate_useable_cores fi -if [[ $is_helix || $is_manager || $is_import ]]; then - whiptail_homenet_manager -fi +whiptail_homenet_manager +whiptail_dockernet_check if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then set_base_heapsizes @@ -415,6 +431,11 @@ if [[ $is_manager || $is_import ]]; then whiptail_so_allow; fi whiptail_make_changes # From here on changes will be made. +echo "1" > /root/accept_changes + +if [[ $is_reinstall ]]; then + reinstall_init +fi if [[ -n "$TURBO" ]]; then use_turbo_proxy @@ -422,16 +443,23 @@ fi if [[ "$setup_type" == 'iso' ]]; then # Init networking so rest of install works - set_hostname_iso + set_hostname set_management_interface fi disable_ipv6 disable_auto_start +if [[ "$setup_type" != 'iso' ]]; then + set_hostname +fi + +if [[ $is_minion ]]; then + add_mngr_ip_to_hosts +fi + { - set_hostname; - set_version; + mark_version; clear_manager; } >> $setup_log 2>&1 @@ -567,19 +595,20 @@ fi set_progress_str 25 'Configuring firewall' set_initial_firewall_policy >> $setup_log 2>&1 + # create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf + mkdir -p /opt/so/conf/so-status/ >> $setup_log 2>&1 + touch /opt/so/conf/so-status/so-status.conf >> $setup_log 2>&1 + if [[ "$setup_type" == 'iso' ]]; then set_progress_str 26 'Copying containers from iso' else set_progress_str 26 'Downloading containers from the internet' fi - import_registry_docker >> $setup_log 2>&1 + import_registry_docker >> $setup_log 2>&1 salt-call state.apply -l info registry >> $setup_log 2>&1 - docker_seed_registry 2>> "$setup_log" # ~ 60% when finished + docker_seed_registry # ~ 60% when finished set_progress_str 60 "$(print_salt_state_apply 'manager')" - if [[ "$STRELKARULES" == 1 ]]; then - /usr/sbin/so-yara-update >> $setup_log 2>&1 - fi salt-call state.apply -l info manager >> $setup_log 2>&1 set_progress_str 61 "$(print_salt_state_apply 'idstools')" @@ -615,12 +644,14 @@ fi salt-call state.apply -l info pcap >> $setup_log 2>&1 fi - if [[ $is_sensor || $is_import ]]; then + if [[ $is_sensor || $is_import || $is_helix ]]; then set_progress_str 66 "$(print_salt_state_apply 'suricata')" salt-call state.apply -l info suricata >> $setup_log 2>&1 - set_progress_str 67 "$(print_salt_state_apply 'zeek')" - salt-call state.apply -l info zeek >> $setup_log 2>&1 + if [[ $ZEEKVERSION == 'ZEEK' ]]; then + set_progress_str 67 "$(print_salt_state_apply 'zeek')" + salt-call state.apply -l info zeek >> $setup_log 2>&1 + fi fi if [[ $is_node ]]; then @@ -661,12 +692,9 @@ fi fi if [[ "$OSQUERY" = 1 ]]; then - if [[ "$PLAYBOOK" != 1 ]]; then - set_progress_str 74 "$(print_salt_state_apply 'mysql')" - salt-call state.apply -l info mysql >> $setup_log 2>&1 - fi set_progress_str 75 "$(print_salt_state_apply 'fleet')" + salt-call state.apply fleet.event_enable-fleet # enable fleet in the global pillar salt-call state.apply -l info fleet >> $setup_log 2>&1 set_progress_str 76 "$(print_salt_state_apply 'redis')" @@ -678,24 +706,24 @@ fi salt-call state.apply -l info fleet.event_update-custom-hostname pillar="$pillar_override" >> $setup_log 2>&1 fi - set_progress_str 74 "$(print_salt_state_apply 'so-fleet-setup')" + set_progress_str 78 "$(print_salt_state_apply 'so-fleet-setup')" so-fleet-setup "$FLEETNODEUSER" "$FLEETNODEPASSWD1" >> $setup_log 2>&1 fi if [[ "$WAZUH" = 1 ]]; then - set_progress_str 78 "$(print_salt_state_apply 'wazuh')" + set_progress_str 79 "$(print_salt_state_apply 'wazuh')" salt-call state.apply -l info wazuh >> $setup_log 2>&1 fi if [[ "$THEHIVE" = 1 ]]; then - set_progress_str 79 "$(print_salt_state_apply 'thehive')" + set_progress_str 80 "$(print_salt_state_apply 'thehive')" salt-call state.apply -l info thehive >> $setup_log 2>&1 fi if [[ "$STRELKA" = 1 ]]; then if [[ $is_sensor ]]; then - set_progress_str 80 "$(print_salt_state_apply 'strelka')" + set_progress_str 81 "$(print_salt_state_apply 'strelka')" salt-call state.apply -l info strelka >> $setup_log 2>&1 fi if [[ $STRELKARULES == 1 ]]; then @@ -704,15 +732,15 @@ fi fi if [[ $is_manager || $is_helix || $is_import ]]; then - set_progress_str 81 "$(print_salt_state_apply 'utility')" + set_progress_str 82 "$(print_salt_state_apply 'utility')" salt-call state.apply -l info utility >> $setup_log 2>&1 fi if [[ ( $is_helix || $is_manager || $is_node ) && ! $is_eval ]]; then - set_progress_str 82 "$(print_salt_state_apply 'logstash')" + set_progress_str 83 "$(print_salt_state_apply 'logstash')" salt-call state.apply -l info logstash >> $setup_log 2>&1 - set_progress_str 83 "$(print_salt_state_apply 'filebeat')" + set_progress_str 84 "$(print_salt_state_apply 'filebeat')" salt-call state.apply -l info filebeat >> $setup_log 2>&1 fi diff --git a/setup/so-whiptail b/setup/so-whiptail index 302fb96e6..ebc515782 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -61,8 +61,22 @@ whiptail_basic_zeek() { local PROCS=$lb_procs fi - BASICZEEK=$(whiptail --title "Security Onion Setup" --inputbox \ - "Enter the number of zeek processes:" 10 75 "$PROCS" 3>&1 1>&2 2>&3) + ZEEKPINS=$(whiptail --noitem --title "Pin Zeek CPUS" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 ) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus + + ZEEKPINS=$(echo "$ZEEKPINS" | tr -d '"') + + IFS=' ' read -ra ZEEKPINS <<< "$ZEEKPINS" +} + +whiptail_zeek_version() { + + [ -n "$TESTING" ] && return + + ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate metadata?" 20 75 4 \ + "ZEEK" "Zeek (formerly known as Bro)" ON \ + "SURICATA" "Suricata" OFF 3>&1 1>&2 2>&3) local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -91,8 +105,10 @@ whiptail_cancel() { echo "/root/installtmp removed"; } >> $setup_log 2>&1 fi - exit + title "User cancelled setup, no changes made." + + exit } whiptail_check_exitstatus() { @@ -244,6 +260,88 @@ whiptail_create_web_user_password2() { } +whiptail_fleet_custom_hostname() { + + [ -n "$TESTING" ] && return + + FLEETCUSTOMHOSTNAME=$(whiptail --title "Security Onion Install" --inputbox \ + "What FQDN should osquery clients use for connections to this Fleet node? Leave blank if the local system hostname will be used." 10 60 3>&1 1>&2 2>&3) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + +whiptail_requirements_error() { + + local requirement_needed=$1 + local current_val=$2 + local needed_val=$3 + + [ -n "$TESTING" ] && return + + if [[ $(echo "$requirement_needed" | tr '[:upper:]' '[:lower:]') == 'nics' ]]; then + whiptail --title "Security Onion Setup" \ + --msgbox "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Press OK to exit setup and reconfigure the machine." 10 75 + + # Same as whiptail_cancel, but changed the wording to exit instead of cancel. + whiptail --title "Security Onion Setup" --msgbox "Exiting Setup. No changes have been made." 8 75 + if [ -d "/root/installtmp" ]; then + { + echo "/root/installtmp exists"; + install_cleanup; + echo "/root/installtmp removed"; + } >> $setup_log 2>&1 + fi + exit + else + whiptail --title "Security Onion Setup" \ + --yesno "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Press YES to continue anyway, or press NO to cancel." 10 75 + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus + fi +} + +whiptail_storage_requirements() { + local mount=$1 + local current_val=$2 + local needed_val=$3 + + [ -n "$TESTING" ] && return + + read -r -d '' message <<- EOM + Free space on mount point '${mount}' is currently ${current_val}. + + You need ${needed_val} to meet minimum requirements. + + Visit https://docs.securityonion.net/en/2.1/hardware.html for more information. + + Press YES to continue anyway, or press NO to cancel. + EOM + + whiptail \ + --title "Security Onion Setup" \ + --yesno "$message" \ + 14 75 + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + +whiptail_invalid_pass_warning() { + + [ -n "$TESTING" ] && return + + whiptail --title "Security Onion Setup" --msgbox "Please choose a more secure password." 8 75 +} + +whiptail_invalid_pass_characters_warning() { + + [ -n "$TESTING" ] && return + + whiptail --title "Security Onion Setup" --msgbox "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password." 8 75 +} + whiptail_cur_close_days() { [ -n "$TESTING" ] && return @@ -314,6 +412,31 @@ whiptail_dhcp_warn() { } +whiptail_dockernet_check(){ + + [ -n "$TESTING" ] && return + + whiptail --title "Security Onion Setup" --yesno \ + "Do you want to keep the default Docker IP range? \n \n(Choose yes if you don't know what this means)" 10 75 + + local exitstatus=$? + + if [[ $exitstatus == 1 ]]; then + whiptail_dockernet_net + fi +} + +whiptail_dockernet_net() { + + [ -n "$TESTING" ] && return + + DOCKERNET=$(whiptail --title "Security Onion Setup" --inputbox \ + "\nEnter a /24 network range for docker to use: \nThe same range MUST be used on ALL nodes \n(Default value is pre-populated.)" 10 75 172.17.0.0 3>&1 1>&2 2>&3) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus + +} whiptail_enable_components() { [ -n "$TESTING" ] && return @@ -432,11 +555,12 @@ whiptail_install_type() { # What kind of install are we doing? install_type=$(whiptail --title "Security Onion Setup" --radiolist \ - "Choose install type:" 10 65 4 \ + "Choose install type:" 12 65 5 \ "EVAL" "Evaluation mode (not for production) " ON \ "STANDALONE" "Standalone production install " OFF \ "DISTRIBUTED" "Distributed install submenu " OFF \ "IMPORT" "Standalone to import PCAP or log files " OFF \ + "OTHER" "Other install types" OFF \ 3>&1 1>&2 2>&3 ) @@ -444,19 +568,57 @@ whiptail_install_type() { whiptail_check_exitstatus $exitstatus if [[ $install_type == "DISTRIBUTED" ]]; then + whiptail_install_type_dist + elif [[ $install_type == "OTHER" ]]; then + whiptail_install_type_other + fi + + export install_type +} + +whiptail_install_type_dist() { + + [ -n "$TESTING" ] && return + + install_type=$(whiptail --title "Security Onion Setup" --radiolist \ + "Choose distributed node type:" 13 60 6 \ + "MANAGER" "Start a new grid " ON \ + "SENSOR" "Create a forward only sensor " OFF \ + "SEARCHNODE" "Add a search node with parsing " OFF \ + "MANAGERSEARCH" "Manager + search node " OFF \ + "FLEET" "Dedicated Fleet Osquery Node " OFF \ + "HEAVYNODE" "Sensor + Search Node " OFF \ + 3>&1 1>&2 2>&3 + # "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" OFF \ # TODO + # "WARMNODE" "Add Warm Node to existing Hot or Search node" OFF \ # TODO + # "WAZUH" "Stand Alone Wazuh Server" OFF \ # TODO + # "STRELKA" "Stand Alone Strelka Node" OFF \ # TODO + ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus + + export install_type +} + +whiptail_install_type_other() { + + [ -n "$TESTING" ] && return + + # so-analyst-install will only work with a working network connection + # so only show it on network installs for now + if [[ $setup_type == 'network' ]]; then install_type=$(whiptail --title "Security Onion Setup" --radiolist \ - "Choose distributed node type:" 13 60 6 \ - "MANAGER" "Start a new grid " ON \ - "SENSOR" "Create a forward only sensor " OFF \ - "SEARCHNODE" "Add a search node with parsing " OFF \ - "MANAGERSEARCH" "Manager + search node " OFF \ - "FLEET" "Dedicated Fleet Osquery Node " OFF \ - "HEAVYNODE" "Sensor + Search Node " OFF \ + "Choose distributed node type:" 9 65 2 \ + "ANALYST" "Quit setup and run so-analyst-install " ON \ + "HELIXSENSOR" "Create a Helix sensor " OFF \ + 3>&1 1>&2 2>&3 + ) + else + install_type=$(whiptail --title "Security Onion Setup" --radiolist \ + "Choose distributed node type:" 8 65 1 \ + "HELIXSENSOR" "Create a Helix sensor " ON \ 3>&1 1>&2 2>&3 - # "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" OFF \ # TODO - # "WARMNODE" "Add Warm Node to existing Hot or Search node" OFF \ # TODO - # "WAZUH" "Stand Alone Wazuh Server" OFF \ # TODO - # "STRELKA" "Stand Alone Strelka Node" OFF \ # TODO ) fi @@ -519,6 +681,8 @@ whiptail_management_interface_dns() { MDNS=$(whiptail --title "Security Onion Setup" --inputbox \ "Enter your DNS servers separated by a space:" 10 60 8.8.8.8 8.8.4.4 3>&1 1>&2 2>&3) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus } whiptail_management_interface_dns_search() { @@ -528,6 +692,8 @@ whiptail_management_interface_dns_search() { MSEARCH=$(whiptail --title "Security Onion Setup" --inputbox \ "Enter your DNS search domain:" 10 60 searchdomain.local 3>&1 1>&2 2>&3) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus } whiptail_management_interface_gateway() { @@ -537,6 +703,8 @@ whiptail_management_interface_gateway() { MGATEWAY=$(whiptail --title "Security Onion Setup" --inputbox \ "Enter your gateway:" 10 60 X.X.X.X 3>&1 1>&2 2>&3) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus } whiptail_management_interface_ip() { @@ -546,6 +714,8 @@ whiptail_management_interface_ip() { MIP=$(whiptail --title "Security Onion Setup" --inputbox \ "Enter your IP address:" 10 60 X.X.X.X 3>&1 1>&2 2>&3) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus } whiptail_management_interface_mask() { @@ -555,6 +725,8 @@ whiptail_management_interface_mask() { MMASK=$(whiptail --title "Security Onion Setup" --inputbox \ "Enter the bit mask for your subnet:" 10 60 24 3>&1 1>&2 2>&3) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus } whiptail_management_nic() { @@ -604,6 +776,8 @@ whiptail_management_server() { if ! getent hosts "$MSRV"; then add_manager_hostfile + else + MSRVIP=$(getent hosts "$MSRV" | awk 'NR==1{print $1}') fi } diff --git a/setup/yum_repos/saltstack.repo b/setup/yum_repos/saltstack.repo index 2e1b425fb..856d4d80c 100644 --- a/setup/yum_repos/saltstack.repo +++ b/setup/yum_repos/saltstack.repo @@ -1,6 +1,6 @@ [saltstack] name=SaltStack repo for RHEL/CentOS $releasever PY3 -baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/ +baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.2/ enabled=1 gpgcheck=1 -gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub \ No newline at end of file +gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.2/SALTSTACK-GPG-KEY.pub \ No newline at end of file diff --git a/sigs/securityonion-2.3.10.iso.sig b/sigs/securityonion-2.3.10.iso.sig new file mode 100644 index 000000000..f1c9093fd Binary files /dev/null and b/sigs/securityonion-2.3.10.iso.sig differ diff --git a/sigs/securityonion-2.3.2.iso.sig b/sigs/securityonion-2.3.2.iso.sig new file mode 100644 index 000000000..53bfe4569 Binary files /dev/null and b/sigs/securityonion-2.3.2.iso.sig differ