diff --git a/files/master b/files/master
index ba107b939..f14c4194c 100644
--- a/files/master
+++ b/files/master
@@ -61,5 +61,3 @@ peer:
reactor:
- 'so/fleet':
- salt://reactor/fleet.sls
- - 'salt/beacon/*/zeek/':
- - salt://reactor/zeek.sls
diff --git a/pillar/healthcheck/eval.sls b/pillar/healthcheck/eval.sls
index fbfa54e45..dd1a027e9 100644
--- a/pillar/healthcheck/eval.sls
+++ b/pillar/healthcheck/eval.sls
@@ -1,5 +1,5 @@
healthcheck:
enabled: False
- schedule: 60
+ schedule: 300
checks:
- zeek
diff --git a/pillar/healthcheck/sensor.sls b/pillar/healthcheck/sensor.sls
index fbfa54e45..dd1a027e9 100644
--- a/pillar/healthcheck/sensor.sls
+++ b/pillar/healthcheck/sensor.sls
@@ -1,5 +1,5 @@
healthcheck:
enabled: False
- schedule: 60
+ schedule: 300
checks:
- zeek
diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls
index 4c295df14..b4e42a8a3 100644
--- a/pillar/logstash/search.sls
+++ b/pillar/logstash/search.sls
@@ -2,59 +2,15 @@ logstash:
pipelines:
search:
config:
- - so/1000_preprocess_log_elapsed.conf
- - so/1001_preprocess_syslogng.conf
- - so/1002_preprocess_json.conf
- - so/1004_preprocess_syslog_types.conf
- - so/1026_preprocess_dhcp.conf
- - so/1029_preprocess_esxi.conf
- - so/1030_preprocess_greensql.conf
- - so/1031_preprocess_iis.conf
- - so/1032_preprocess_mcafee.conf
- - so/1033_preprocess_snort.conf
- - so/1034_preprocess_syslog.conf
- - so/2000_network_flow.conf
- - so/6002_syslog.conf
- - so/6101_switch_brocade.conf
- - so/6200_firewall_fortinet.conf
- - so/6201_firewall_pfsense.conf
- - so/6300_windows.conf
- - so/6301_dns_windows.conf
- - so/6400_suricata.conf
- - so/6500_ossec.conf
- - so/6501_ossec_sysmon.conf
- - so/6502_ossec_autoruns.conf
- - so/6600_winlogbeat_sysmon.conf
- - so/6700_winlogbeat.conf
- - so/7100_osquery_wel.conf
- - so/7200_strelka.conf
- - so/8001_postprocess_common_ip_augmentation.conf
- - so/8007_postprocess_http.conf
- - so/8200_postprocess_tagging.conf
- - so/8998_postprocess_log_elapsed.conf
- - so/8999_postprocess_rename_type.conf
- so/0900_input_redis.conf.jinja
- - so/9000_output_bro.conf.jinja
- - so/9001_output_switch.conf.jinja
+ - so/9000_output_zeek.conf.jinja
- so/9002_output_import.conf.jinja
- - so/9004_output_flow.conf.jinja
- - so/9026_output_dhcp.conf.jinja
- - so/9029_output_esxi.conf.jinja
- - so/9030_output_greensql.conf.jinja
- - so/9031_output_iis.conf.jinja
- - so/9032_output_mcafee.conf.jinja
- - so/9033_output_snort.conf.jinja
- - so/9034_output_syslog.conf.jinja
- so/9100_output_osquery.conf.jinja
- - so/9200_output_firewall.conf.jinja
- - so/9300_output_windows.conf.jinja
- - so/9301_output_dns_windows.conf.jinja
- so/9400_output_suricata.conf.jinja
- so/9500_output_beats.conf.jinja
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
templates:
- - so/beats-template.json
- - so/logstash-ossec-template.json
- - so/logstash-strelka-template.json
- - so/logstash-template.json
+ - so/so-beats-template.json
+ - so/so-common-template.json
+ - so/so-zeek-template.json
diff --git a/pillar/top.sls b/pillar/top.sls
index 35621b6c2..e2a3a4fdc 100644
--- a/pillar/top.sls
+++ b/pillar/top.sls
@@ -21,6 +21,7 @@ base:
- static
- firewall.*
- data.*
+ - secrets
- minions.{{ grains.id }}
'*_master':
@@ -32,6 +33,7 @@ base:
- firewall.*
- data.*
- brologs
+ - secrets
- logstash
- logstash.eval
- healthcheck.eval
@@ -61,4 +63,5 @@ base:
- static
- firewall.*
- data.*
+ - secrets
- minions.{{ grains.id }}
diff --git a/salt/_beacons/zeek.py b/salt/_beacons/zeek.py
index 0db9d3010..117c2b401 100644
--- a/salt/_beacons/zeek.py
+++ b/salt/_beacons/zeek.py
@@ -5,7 +5,7 @@ def status():
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl status'"
retval = __salt__['docker.run']('so-zeek', cmd)
- logging.debug('zeekctl_module: zeekctl.status retval: %s' % retval)
+ logging.info('zeekctl_module: zeekctl.status retval: %s' % retval)
return retval
@@ -15,11 +15,11 @@ def beacon(config):
retval = []
is_enabled = __salt__['healthcheck.is_enabled']()
- logging.debug('zeek_beacon: healthcheck_is_enabled: %s' % is_enabled)
+ logging.info('zeek_beacon: healthcheck_is_enabled: %s' % is_enabled)
if is_enabled:
zeekstatus = status().lower().split(' ')
- logging.debug('zeek_beacon: zeekctl.status: %s' % str(zeekstatus))
+ logging.info('zeek_beacon: zeekctl.status: %s' % str(zeekstatus))
if 'stopped' in zeekstatus or 'crashed' in zeekstatus or 'error' in zeekstatus or 'error:' in zeekstatus:
zeek_restart = True
else:
diff --git a/salt/_modules/healthcheck.py b/salt/_modules/healthcheck.py
index 2dafa23d3..e5aedff00 100644
--- a/salt/_modules/healthcheck.py
+++ b/salt/_modules/healthcheck.py
@@ -3,7 +3,7 @@
import logging
import sys
-allowed_functions = ['is_enabled,zeek']
+allowed_functions = ['is_enabled', 'zeek']
states_to_apply = []
@@ -65,14 +65,18 @@ def run(checks=''):
return retval
+def send_event(tag, eventdata):
+ __salt__['event.send'](tag, eventdata[0])
+
+
def zeek():
calling_func = sys._getframe().f_back.f_code.co_name
- logging.info('healthcheck_module: zeek function caller: %s' % calling_func)
+ logging.debug('healthcheck_module: zeek function caller: %s' % calling_func)
retval = []
retcode = __salt__['zeekctl.status'](verbose=False)
- logging.info('healthcheck_module: zeekctl.status retcode: %i' % retcode)
+ logging.debug('healthcheck_module: zeekctl.status retcode: %i' % retcode)
if retcode:
zeek_restart = True
if calling_func != 'beacon':
@@ -86,5 +90,6 @@ def zeek():
retval.append({'zeek_restart': zeek_restart})
+ send_event('so/healthcheck/zeek', retval)
__salt__['telegraf.send']('healthcheck zeek_restart=%s' % str(zeek_restart))
return retval
diff --git a/salt/_modules/zeekctl.py b/salt/_modules/zeekctl.py
index 40f6130e8..ab3cb37d3 100644
--- a/salt/_modules/zeekctl.py
+++ b/salt/_modules/zeekctl.py
@@ -142,7 +142,7 @@ def status(verbose=True):
retval = __salt__['docker.run']('so-zeek', cmd)
if not verbose:
retval = __context__['retcode']
- logging.info('zeekctl_module: zeekctl.status_NOTVERBOSE retval: %s' % retval)
+ logging.info('zeekctl_module: zeekctl.status retval: %s' % retval)
return retval
diff --git a/salt/auth/init.sls b/salt/auth/init.sls
deleted file mode 100644
index abbe514d3..000000000
--- a/salt/auth/init.sls
+++ /dev/null
@@ -1,30 +0,0 @@
-{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
-{% set MASTER = salt['grains.get']('master') %}
-
-so-auth-api-dir:
- file.directory:
- - name: /opt/so/conf/auth/api
- - user: 939
- - group: 939
- - makedirs: True
-
-so-auth-api:
- docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-auth-api:{{ VERSION }}
- - hostname: so-auth-api
- - name: so-auth-api
- - environment:
- - BASE_PATH: "/so-auth/api"
- - AUTH_TOKEN_TIMEOUT: 32400
- - binds:
- - /opt/so/conf/auth/api:/data
- - port_bindings:
- - 0.0.0.0:5656:5656
-
-so-auth-ui:
- docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-auth-ui:{{ VERSION }}
- - hostname: so-auth-ui
- - name: so-auth-ui
- - port_bindings:
- - 0.0.0.0:4242:80
diff --git a/salt/common/grafana/grafana_dashboards/eval/eval.json b/salt/common/grafana/grafana_dashboards/eval/eval.json
index f012bf3e8..9d00efe2f 100644
--- a/salt/common/grafana/grafana_dashboards/eval/eval.json
+++ b/salt/common/grafana/grafana_dashboards/eval/eval.json
@@ -859,7 +859,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Zeek Restarts",
+ "title": "Zeek Restarts via Healthcheck",
"tooltip": {
"shared": true,
"sort": 0,
diff --git a/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json b/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
index 914abcb6b..9663dfd79 100644
--- a/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
+++ b/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
@@ -2256,7 +2256,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Zeek Restarts",
+ "title": "Zeek Restarts via Healthcheck",
"tooltip": {
"shared": true,
"sort": 0,
diff --git a/salt/common/nginx/index.html b/salt/common/nginx/index.html
deleted file mode 100644
index e501e5377..000000000
--- a/salt/common/nginx/index.html
+++ /dev/null
@@ -1,163 +0,0 @@
-
-
-
-Security Onion - Hybrid Hunter
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Hybrid Hunter Alpha 1.1.4 - Feature Parity Release
-
-
-
Changes:
-
- - Added new in-house auth method [Security Onion Auth](https://github.com/Security-Onion-Solutions/securityonion-auth).
- - Web user creation is done via the browser now instead of so-user-add.
- - New Logstash pipeline setup. Now uses multiple pipelines.
- - New Master + Search node type and well as a Heavy Node type in the install.
- - Change all nodes to point to the docker registry on the Master. This cuts down on the calls to dockerhub.
- - Zeek 3.0.1
- - Elastic 6.8.6
- - New SO Start | Stop | Restart scripts for all components (eg. `so-playbook-restart`).
- - BPF support for Suricata (NIDS), Steno (PCAP) & Zeek ([Docs](https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/BPF)).
- - Updated Domain Stats & Frequency Server containers to Python3 & created new Salt states for them.
- - Added so-status script which gives an easy to read look at container status.
- - Manage threshold.conf for Suricata using the thresholding pillar.
- - The ISO now includes all the docker containers for faster install speeds.
- - You now set the password for the onion account during the iso install. This account is temporary and will be removed after so-setup.
- - Updated Helix parsers for better compatibility.
- - Updated telegraf docker to include curl and jq.
- - CVE-2020-0601 Zeek Detection Script.
- - ISO Install now prompts you to create a password for the onion user during imaging. This account gets disabled during setup.
- - Check out the Hybrid Hunter Quick Start Guide.
-
-
-
-
-
-
-
-
diff --git a/salt/common/nginx/nginx.conf.so-eval b/salt/common/nginx/nginx.conf.so-eval
index 89e6fe46b..19ddf0c5f 100644
--- a/salt/common/nginx/nginx.conf.so-eval
+++ b/salt/common/nginx/nginx.conf.so-eval
@@ -27,6 +27,7 @@ http {
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
+ client_max_body_size 1024M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
@@ -142,6 +143,18 @@ http {
proxy_set_header Proxy "";
}
+
+ location /packages/ {
+ try_files $uri =206;
+ auth_request /auth/sessions/whoami;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+ }
+
location /grafana/ {
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/;
diff --git a/salt/common/nginx/nginx.conf.so-fleet b/salt/common/nginx/nginx.conf.so-fleet
index dd4b22d9b..a97b85e78 100644
--- a/salt/common/nginx/nginx.conf.so-fleet
+++ b/salt/common/nginx/nginx.conf.so-fleet
@@ -86,14 +86,6 @@ http {
}
-
- error_page 401 = @error401;
-
- location @error401 {
- add_header Set-Cookie "NSREDIRECT=http://{{ MAINIP }}$request_uri;Domain={{ MAINIP }};Path=/;Max-Age=60000";
- return 302 http://{{ MAINIP }}/so-auth/loginpage/;
- }
-
error_page 404 /404.html;
location = /40x.html {
}
diff --git a/salt/common/nginx/nginx.conf.so-master b/salt/common/nginx/nginx.conf.so-master
index 89e6fe46b..19ddf0c5f 100644
--- a/salt/common/nginx/nginx.conf.so-master
+++ b/salt/common/nginx/nginx.conf.so-master
@@ -27,6 +27,7 @@ http {
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
+ client_max_body_size 1024M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
@@ -142,6 +143,18 @@ http {
proxy_set_header Proxy "";
}
+
+ location /packages/ {
+ try_files $uri =206;
+ auth_request /auth/sessions/whoami;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+ }
+
location /grafana/ {
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/;
diff --git a/salt/common/nginx/nginx.conf.so-mastersearch b/salt/common/nginx/nginx.conf.so-mastersearch
index 89e6fe46b..19ddf0c5f 100644
--- a/salt/common/nginx/nginx.conf.so-mastersearch
+++ b/salt/common/nginx/nginx.conf.so-mastersearch
@@ -27,6 +27,7 @@ http {
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
+ client_max_body_size 1024M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
@@ -142,6 +143,18 @@ http {
proxy_set_header Proxy "";
}
+
+ location /packages/ {
+ try_files $uri =206;
+ auth_request /auth/sessions/whoami;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+ }
+
location /grafana/ {
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/;
diff --git a/salt/common/tools/sbin/so-auth-restart b/salt/common/tools/sbin/so-auth-restart
deleted file mode 100755
index 8659b1e3a..000000000
--- a/salt/common/tools/sbin/so-auth-restart
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-. /usr/sbin/so-common
-
-/usr/sbin/so-restart auth $1
-
diff --git a/salt/common/tools/sbin/so-auth-start b/salt/common/tools/sbin/so-auth-start
deleted file mode 100755
index 5330f662d..000000000
--- a/salt/common/tools/sbin/so-auth-start
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-. /usr/sbin/so-common
-
-/usr/sbin/so-start auth $1
diff --git a/salt/common/tools/sbin/so-auth-stop b/salt/common/tools/sbin/so-auth-stop
deleted file mode 100755
index 5ca6db7e2..000000000
--- a/salt/common/tools/sbin/so-auth-stop
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-. /usr/sbin/so-common
-
-/usr/sbin/so-stop auth $1
diff --git a/salt/common/tools/sbin/so-elastic-download b/salt/common/tools/sbin/so-elastic-download
index 020a42f79..9e2c56719 100755
--- a/salt/common/tools/sbin/so-elastic-download
+++ b/salt/common/tools/sbin/so-elastic-download
@@ -2,8 +2,6 @@
MASTER=MASTER
VERSION="HH1.1.4"
TRUSTED_CONTAINERS=( \
-"so-auth-api:$VERSION" \
-"so-auth-ui:$VERSION" \
"so-core:$VERSION" \
"so-thehive-cortex:$VERSION" \
"so-curator:$VERSION" \
diff --git a/salt/common/tools/sbin/so-restart b/salt/common/tools/sbin/so-restart
index 2e3c0a00c..bbcfe4c20 100755
--- a/salt/common/tools/sbin/so-restart
+++ b/salt/common/tools/sbin/so-restart
@@ -32,6 +32,5 @@ fi
case $1 in
"cortex") docker stop so-thehive-cortex so-thehive && docker rm so-thehive-cortex so-thehive && salt-call state.apply hive queue=True;;
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
- "auth") docker stop so-auth-api; docker stop so-auth-ui; salt-call state.apply auth queue=True;;
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
esac
diff --git a/salt/common/tools/sbin/so-start b/salt/common/tools/sbin/so-start
index 889160122..a198377a1 100755
--- a/salt/common/tools/sbin/so-start
+++ b/salt/common/tools/sbin/so-start
@@ -32,16 +32,5 @@ fi
case $1 in
"all") salt-call state.highstate queue=True;;
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
- "auth")
- if docker ps | grep -q so-auth-api; then
- if docker ps | grep -q so-auth-ui; then
- printf "\n$1 is already running!\n\n"
- else
- docker rm so-auth-api >/dev/null 2>&1; docker rm so-auth-ui >/dev/null 2>&1; salt-call state.apply $1 queue=True
- fi
- else
- docker rm so-auth-api >/dev/null 2>&1; docker rm so-auth-ui >/dev/null 2>&1; salt-call state.apply $1 queue=True
- fi
- ;;
*) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
esac
diff --git a/salt/common/tools/sbin/so-stop b/salt/common/tools/sbin/so-stop
index 8d5770b64..4a0dd944b 100755
--- a/salt/common/tools/sbin/so-stop
+++ b/salt/common/tools/sbin/so-stop
@@ -24,7 +24,6 @@ printf "Stopping $1...\n"
echo $banner
case $1 in
- "auth") docker stop so-auth-api; docker rm so-auth-api; docker stop so-auth-ui; docker rm so-auth-ui ;;
*) docker stop so-$1 ; docker rm so-$1 ;;
esac
diff --git a/salt/common/tools/sbin/so-user b/salt/common/tools/sbin/so-user
index c7fd19a4c..357614efb 100755
--- a/salt/common/tools/sbin/so-user
+++ b/salt/common/tools/sbin/so-user
@@ -22,8 +22,17 @@ got_root() {
got_root
if [[ $# < 1 || $# > 2 ]]; then
- echo "Usage: $0 [email]"
- echo "Note that checkpw only checks that the given password meets the minimum requirements, it does not test that it matches for an existing user."
+ echo "Usage: $0 [email]"
+ echo ""
+ echo " list: Lists all user email addresses currently defined in the identity system"
+ echo " add: Adds a new user to the identity system; requires 'email' parameter"
+ echo " update: Updates a user's password; requires 'email' parameter"
+ echo " delete: Deletes an existing user; requires 'email' parameter"
+ echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter"
+ echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter"
+ echo " valpass: Validates that a password is acceptable for defining a new user"
+ echo ""
+ echo " Note that the password can be piped into stdin to avoid prompting for it."
exit 1
fi
@@ -50,14 +59,16 @@ function require() {
}
# Verify this environment is capable of running this script
-require "argon2"
-require "jq"
-require "curl"
-require "openssl"
-require "sqlite3"
-[[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable"
-response=$(curl -Ss ${kratosUrl}/)
-[[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable"
+function verifyEnvironment() {
+ require "argon2"
+ require "jq"
+ require "curl"
+ require "openssl"
+ require "sqlite3"
+ [[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable"
+ response=$(curl -Ss ${kratosUrl}/)
+ [[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable"
+}
function findIdByEmail() {
email=$1
@@ -77,6 +88,15 @@ function validatePassword() {
fi
}
+function validateEmail() {
+ email=$1
+ # (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
+ if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
+ echo "Email address is invalid"
+ exit 3
+ fi
+}
+
function updatePassword() {
identityId=$1
@@ -164,17 +184,21 @@ function deleteUser() {
case "${operation}" in
"add")
+ verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
+ validateEmail "$email"
createUser "$email"
echo "Successfully added new user"
;;
"list")
+ verifyEnvironment
listUsers
;;
"update")
+ verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
updateUser "$email"
@@ -182,13 +206,25 @@ case "${operation}" in
;;
"delete")
+ verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
deleteUser "$email"
echo "Successfully deleted user"
;;
- "checkpw")
+ "validate")
+ validateEmail "$email"
+ updatePassword
+ echo "Email and password are acceptable"
+ ;;
+
+ "valemail")
+ validateEmail "$email"
+ echo "Email is acceptable"
+ ;;
+
+ "valpass")
updatePassword
echo "Password is acceptable"
;;
diff --git a/salt/elasticsearch/files/ingest/suricata.common b/salt/elasticsearch/files/ingest/suricata.common
index c5009f31e..4a1f293b2 100644
--- a/salt/elasticsearch/files/ingest/suricata.common
+++ b/salt/elasticsearch/files/ingest/suricata.common
@@ -8,6 +8,7 @@
{ "rename":{ "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
{ "rename":{ "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
+ { "set": { "field": "observer.name", "value": "{{agent.name}}" } },
{ "remove": { "field": ["agent"], "ignore_failure": true } },
{ "pipeline": { "name": "common" } }
]
diff --git a/salt/elasticsearch/files/ingest/zeek.common b/salt/elasticsearch/files/ingest/zeek.common
index 88949353c..fe1e50fe1 100644
--- a/salt/elasticsearch/files/ingest/zeek.common
+++ b/salt/elasticsearch/files/ingest/zeek.common
@@ -15,6 +15,7 @@
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
{ "set": { "field": "server.port", "value": "{{destination.port}}" } },
+ { "set": { "field": "observer.name", "value": "{{agent.name}}" } },
{ "date": { "field": "message2.ts", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "ignore_failure": true } },
{ "remove": { "field": ["agent"], "ignore_failure": true } },
{ "pipeline": { "name": "common" } }
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index 3be56233e..50dab6d2f 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -179,7 +179,8 @@ filebeat.inputs:
close_removed: false
{%- endif %}
-#----------------------------- Logstash output ---------------------------------
+#----------------------------- Elasticsearch/Logstash output ---------------------------------
+{%- if grains['role'] == "so-eval" %}
output.elasticsearch:
enabled: true
hosts: ["{{ MASTER }}:9200"]
@@ -202,12 +203,15 @@ output.elasticsearch:
when.contains:
module: "strelka"
-#output.logstash:
+setup.template.enabled: false
+{%- else %}
+
+output.logstash:
# Boolean flag to enable or disable the output module.
- #enabled: true
+ enabled: true
# The Logstash hosts
- #hosts: ["{{ MASTER }}:5644"]
+ hosts: ["{{ MASTER }}:5644"]
# Number of workers per Logstash host.
#worker: 1
@@ -222,21 +226,21 @@ output.elasticsearch:
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
- #ssl.verification_mode: full
+ ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
- #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+ ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
- #ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
+ ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
# Certificate for SSL client authentication
- #ssl.certificate: "/usr/share/filebeat/filebeat.crt"
+ ssl.certificate: "/usr/share/filebeat/filebeat.crt"
# Client Certificate Key
- #ssl.key: "/usr/share/filebeat/filebeat.key"
+ ssl.key: "/usr/share/filebeat/filebeat.key"
setup.template.enabled: false
# A dictionary of settings to place into the settings.index dictionary
@@ -251,7 +255,7 @@ setup.template.enabled: false
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
#_source:
#enabled: false
-
+{%- endif %}
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
index f6e32d0d5..4f1aa0348 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
@@ -239,9 +239,10 @@ spec:
query: chrome_extensions
- description: Disk encryption status and information.
interval: 3600
- name: disk_encryption
+ name: disk_encryption_snapshot
platform: darwin
- query: disk_encryption
+ query: disk_encryption_snapshot
+ snapshot: true
- description: Local system users.
interval: 28800
name: users_snapshot
@@ -282,6 +283,12 @@ spec:
name: sip_config
platform: darwin
query: sip_config
+ - description: Shows information about the wifi network that a host is currently connected to.
+ interval: 28800
+ name: wifi_status_snapshot
+ platform: darwin
+ query: wifi_status_snapshot
+ snapshot: true
- description: Returns the private keys in the users ~/.ssh directory and whether
or not they are encrypted.
interval: 3600
@@ -290,7 +297,8 @@ spec:
query: user_ssh_keys
removed: false
targets:
- labels: null
+ labels:
+ - macOS
---
apiVersion: v1
kind: query
@@ -365,13 +373,10 @@ spec:
description: Returns information about installed event taps. Can be used to detect
keyloggers
name: event_taps
- query: SELECT * FROM event_taps INNER JOIN processes ON event_taps.tapping_process
- = processes.pid WHERE event_tapped NOT LIKE '%mouse%' AND processes.path NOT LIKE
- '%.app%' AND processes.path!='/Library/Application Support/org.pqrs/Karabiner-Elements/bin/karabiner_grabber'
- AND processes.path NOT LIKE '/Users/%/bin/kwm' AND processes.path!='/Library/Rapport/bin/rooksd'
- AND processes.path!='/usr/sbin/universalaccessd' AND processes.path NOT LIKE '/usr/local/Cellar/%'
- AND processes.path NOT LIKE '/System/Library/%' AND processes.path NOT LIKE '%/steamapps/%'
- AND event_taps.enabled=1;
+ query: SELECT * FROM event_taps INNER JOIN processes ON event_taps.tapping_process = processes.pid
+ WHERE event_tapped NOT LIKE '%mouse%' AND processes.path NOT IN ('/usr/libexec/airportd',
+ '/usr/sbin/universalaccessd') AND processes.path NOT LIKE '/System/Library/%' AND processes.path
+ NOT LIKE '%/steamapps/%' AND processes.path NOT LIKE '%.app%' AND event_taps.enabled=1;
---
apiVersion: v1
kind: query
@@ -455,6 +460,13 @@ spec:
---
apiVersion: v1
kind: query
+spec:
+ description: Shows information about the wifi network that a host is currently connected to.
+ name: wifi_status_snapshot
+ query: SELECT * FROM wifi_status;
+---
+apiVersion: v1
+kind: query
spec:
description: Snapshot query for macosx_kextstat
name: macosx_kextstat_snapshot
@@ -479,7 +491,7 @@ kind: query
spec:
description: Safari browser extension details for all users.
name: safari_extensions
- query: SELECT * FROM users JOIN safari_extensions USING (uid);
+ query: SELECT * FROM users CROSS JOIN safari_extensions USING (uid);
---
apiVersion: v1
kind: query
@@ -500,7 +512,7 @@ kind: query
spec:
description: List authorized_keys for each user on the system
name: authorized_keys
- query: SELECT * FROM users JOIN authorized_keys USING (uid);
+ query: SELECT * FROM users CROSS JOIN authorized_keys USING (uid);
---
apiVersion: v1
kind: query
@@ -508,7 +520,7 @@ spec:
description: Application, System, and Mobile App crash logs.
name: crashes
query: SELECT uid, datetime, responsible, exception_type, identifier, version, crash_path
- FROM users JOIN crashes USING (uid);
+ FROM users CROSS JOIN crashes USING (uid);
---
apiVersion: v1
kind: query
@@ -516,7 +528,7 @@ spec:
description: Displays the percentage of free space available on the primary disk
partition
name: disk_free_space_pct
- query: SELECT (blocks_available * 100 / blocks) AS pct FROM mounts WHERE device='/dev/disk1';
+ query: SELECT (blocks_available * 100 / blocks) AS pct FROM mounts WHERE device='/dev/disk1s1';
---
apiVersion: v1
kind: query
@@ -553,7 +565,7 @@ kind: query
spec:
description: Snapshot query for Chrome extensions
name: chrome_extensions_snapshot
- query: SELECT * FROM users JOIN chrome_extensions USING (uid);
+ query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
---
apiVersion: v1
kind: query
@@ -589,14 +601,14 @@ kind: query
spec:
description: All C/NPAPI browser plugin details for all users.
name: browser_plugins
- query: SELECT * FROM users JOIN browser_plugins USING (uid);
+ query: SELECT * FROM users CROSS JOIN browser_plugins USING (uid);
---
apiVersion: v1
kind: query
spec:
description: List installed Firefox addons for all users
name: firefox_addons
- query: SELECT * FROM users JOIN firefox_addons USING (uid);
+ query: SELECT * FROM users CROSS JOIN firefox_addons USING (uid);
---
apiVersion: v1
kind: query
@@ -625,13 +637,13 @@ kind: query
spec:
description: List installed Chrome Extensions for all users
name: chrome_extensions
- query: SELECT * FROM users JOIN chrome_extensions USING (uid);
+ query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
---
apiVersion: v1
kind: query
spec:
description: Disk encryption status and information.
- name: disk_encryption
+ name: disk_encryption_snapshot
query: SELECT * FROM disk_encryption;
---
apiVersion: v1
@@ -691,4 +703,4 @@ spec:
description: Returns the private keys in the users ~/.ssh directory and whether
or not they are encrypted.
name: user_ssh_keys
- query: SELECT * FROM users JOIN user_ssh_keys USING (uid);
+ query: SELECT * FROM users CROSS JOIN user_ssh_keys USING (uid);
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/Windows/osquery.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
index 5e6ea4168..3aa9da280 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
@@ -227,8 +227,35 @@ spec:
platform: windows
query: scheduled_tasks_snapshot
snapshot: true
+ - description: Appcompat shims (.sdb files) installed on Windows hosts.
+ interval: 3600
+ name: appcompat_shims
+ platform: windows
+ query: appcompat_shims
+ - description: Disk encryption status and information snapshot query.
+ interval: 28800
+ name: bitlocker_info_snapshot
+ platform: windows
+ query: bitlocker_info_snapshot
+ snapshot: true
targets:
- labels: null
+ labels:
+ - MS Windows
+---
+apiVersion: v1
+kind: query
+spec:
+ description: Appcompat shims (.sdb files) installed on Windows hosts.
+ name: appcompat_shims
+ query: SELECT * FROM appcompat_shims WHERE description!='EMET_Database' AND
+ executable NOT IN ('setuphost.exe','setupprep.exe','iisexpress.exe');
+---
+apiVersion: v1
+kind: query
+spec:
+ description: Disk encryption status and information snapshot query.
+ name: bitlocker_info_snapshot
+ query: SELECT * FROM bitlocker_info;
---
apiVersion: v1
kind: query
@@ -302,7 +329,7 @@ kind: query
spec:
description: Snapshot query for Chrome extensions
name: chrome_extensions_snapshot
- query: SELECT * FROM users JOIN chrome_extensions USING (uid);
+ query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
---
apiVersion: v1
kind: query
@@ -466,7 +493,7 @@ kind: query
spec:
description: List installed Chrome Extensions for all users
name: chrome_extensions
- query: SELECT * FROM users JOIN chrome_extensions USING (uid);
+ query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
---
apiVersion: v1
kind: query
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/options.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/options.yaml
index dd53bae81..f2bb85d8c 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/options.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/options.yaml
@@ -3,17 +3,9 @@ kind: options
spec:
config:
decorators:
- always:
+ load:
+ - SELECT uuid AS host_uuid FROM system_info;
- SELECT hostname AS hostname FROM system_info;
- - SELECT codename FROM os_version;
- - SELECT uuid AS LiveQuery FROM system_info;
- - SELECT address AS EndpointIP1 FROM interface_addresses where address not
- like '%:%' and address not like '127%' and address not like '169%' order by
- interface desc limit 1;
- - SELECT address AS EndpointIP2 FROM interface_addresses where address not
- like '%:%' and address not like '127%' and address not like '169%' order by
- interface asc limit 1;
- - SELECT hardware_serial FROM system_info;
file_paths:
binaries:
- /usr/bin/%%
@@ -29,7 +21,6 @@ spec:
efi:
- /System/Library/CoreServices/boot.efi
options:
- decorations_top_level: true
disable_distributed: false
disable_tables: windows_events
distributed_interval: 10
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/performance-metrics.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/performance-metrics.yaml
index 25c6a5fbd..e8116bbb1 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/performance-metrics.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/performance-metrics.yaml
@@ -26,7 +26,9 @@ spec:
query: backup_tool_perf
snapshot: true
targets:
- labels: null
+ labels:
+ - MS Windows
+ - macOS
---
apiVersion: v1
kind: query
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/security-tooling-checks.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/security-tooling-checks.yaml
index a65f59dd4..79172d46a 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/security-tooling-checks.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/security-tooling-checks.yaml
@@ -26,7 +26,9 @@ spec:
platform: windows
query: endpoint_security_tool_backend_server_registry_misconfigured
targets:
- labels: null
+ labels:
+ - MS Windows
+ - macOS
---
apiVersion: v1
kind: query
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-application-security.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-application-security.yaml
index 43c034a52..d1008e3cd 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-application-security.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-application-security.yaml
@@ -40,7 +40,8 @@ spec:
platform: windows
query: uac_settings_registry
targets:
- labels: null
+ labels:
+ - MS Windows
---
apiVersion: v1
kind: query
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-compliance.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-compliance.yaml
index eef5f3fcc..38ff4857e 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-compliance.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-compliance.yaml
@@ -137,7 +137,8 @@ spec:
platform: windows
query: send_error_alert_registry
targets:
- labels: null
+ labels:
+ - MS Windows
---
apiVersion: v1
kind: query
diff --git a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-registry-monitoring.yaml b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-registry-monitoring.yaml
index 123ec1a6e..89f01494b 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-registry-monitoring.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Endpoints/packs/windows-registry-monitoring.yaml
@@ -185,7 +185,8 @@ spec:
platform: windows
query: send_error_alert_registry_exists
targets:
- labels: null
+ labels:
+ - MS Windows
---
apiVersion: v1
kind: query
diff --git a/salt/fleet/files/packs/palantir/Fleet/Servers/Linux/osquery.yaml b/salt/fleet/files/packs/palantir/Fleet/Servers/Linux/osquery.yaml
index 5f98fa75b..62ae6d458 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Servers/Linux/osquery.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Servers/Linux/osquery.yaml
@@ -5,254 +5,251 @@ spec:
name: LinuxPack
queries:
- description: Retrieves all the jobs scheduled in crontab in the target system.
- interval: 0
+ interval: 86400
name: crontab_snapshot
platform: linux
query: crontab_snapshot
snapshot: true
- description: Various Linux kernel integrity checked attributes.
- interval: 0
+ interval: 86400
name: kernel_integrity
platform: linux
query: kernel_integrity
- description: Linux kernel modules both loaded and within the load search path.
- interval: 0
+ interval: 3600
name: kernel_modules
platform: linux
query: kernel_modules
- description: Retrieves the current list of mounted drives in the target system.
- interval: 0
+ interval: 86400
name: mounts
platform: linux
query: mounts
- - description: The percentage of total CPU time (system+user) consumed by osqueryd
- interval: 0
- name: osquery_cpu_pct
- platform: linux
- query: osquery_cpu_pct
- snapshot: true
- description: Socket events collected from the audit framework
- interval: 0
+ interval: 10
name: socket_events
platform: linux
query: socket_events
- description: Record the network interfaces and their associated IP and MAC addresses
- interval: 0
+ interval: 600
name: network_interfaces_snapshot
platform: linux
query: network_interfaces_snapshot
snapshot: true
- version: 1.4.5
- description: Information about the running osquery configuration
- interval: 0
+ interval: 86400
name: osquery_info
platform: linux
query: osquery_info
snapshot: true
- description: Display all installed RPM packages
- interval: 0
+ interval: 86400
name: rpm_packages
platform: centos
query: rpm_packages
snapshot: true
- description: Record shell history for all users on system (instead of just root)
- interval: 0
+ interval: 3600
name: shell_history
platform: linux
query: shell_history
- description: File events collected from file integrity monitoring
- interval: 0
+ interval: 10
name: file_events
platform: linux
query: file_events
removed: false
- description: Retrieve the EC2 metadata for this endpoint
- interval: 0
+ interval: 3600
name: ec2_instance_metadata
platform: linux
query: ec2_instance_metadata
- description: Retrieve the EC2 tags for this endpoint
- interval: 0
+ interval: 3600
name: ec2_instance_tags
platform: linux
query: ec2_instance_tags
- description: Snapshot query to retrieve the EC2 tags for this instance
- interval: 0
+ interval: 86400
name: ec2_instance_tags_snapshot
platform: linux
query: ec2_instance_tags_snapshot
snapshot: true
- description: Retrieves the current filters and chains per filter in the target
system.
- interval: 0
+ interval: 86400
name: iptables
platform: linux
query: iptables
- description: Display any SUID binaries that are owned by root
- interval: 0
+ interval: 86400
name: suid_bin
platform: linux
query: suid_bin
- description: Display all installed DEB packages
- interval: 0
+ interval: 86400
name: deb_packages
platform: ubuntu
query: deb_packages
snapshot: true
- description: Find shell processes that have open sockets
- interval: 0
+ interval: 600
name: behavioral_reverse_shell
platform: linux
query: behavioral_reverse_shell
- description: Retrieves all the jobs scheduled in crontab in the target system.
- interval: 0
+ interval: 3600
name: crontab
platform: linux
query: crontab
- - description: Records the system resources used by each query
- interval: 0
- name: per_query_perf
- platform: linux
- query: per_query_perf
- - description: Records avg rate of socket events since daemon started
- interval: 0
- name: socket_rates
- platform: linux
- query: socket_rates
- snapshot: true
- description: Local system users.
- interval: 0
+ interval: 86400
name: users
platform: linux
query: users
- description: Process events collected from the audit framework
- interval: 0
+ interval: 10
name: process_events
platform: linux
query: process_events
- description: Retrieves the list of the latest logins with PID, username and timestamp.
- interval: 0
+ interval: 3600
name: last
platform: linux
query: last
- description: Any processes that run with an LD_PRELOAD environment variable
- interval: 0
+ interval: 60
name: ld_preload
platform: linux
query: ld_preload
- - description: Records avg rate of process events since daemon started
- interval: 0
- name: process_rates
- platform: linux
- query: process_rates
snapshot: true
- description: Information about the system hardware and name
- interval: 0
+ interval: 86400
name: system_info
platform: linux
query: system_info
snapshot: true
- description: Returns the private keys in the users ~/.ssh directory and whether
or not they are encrypted
- interval: 0
+ interval: 86400
name: user_ssh_keys
platform: linux
query: user_ssh_keys
- description: Local system users.
- interval: 0
+ interval: 86400
name: users_snapshot
platform: linux
query: users_snapshot
snapshot: true
- description: DNS resolvers used by the host
- interval: 0
+ interval: 3600
name: dns_resolvers
platform: linux
query: dns_resolvers
- description: Retrieves information from the current kernel in the target system.
- interval: 0
+ interval: 86400
name: kernel_info
platform: linux
query: kernel_info
snapshot: true
- description: Linux kernel modules both loaded and within the load search path.
- interval: 0
+ interval: 86400
name: kernel_modules_snapshot
platform: linux
query: kernel_modules_snapshot
snapshot: true
- description: Generates an event if ld.so.preload is present - used by rootkits
such as Jynx
- interval: 0
+ interval: 3600
name: ld_so_preload_exists
platform: linux
query: ld_so_preload_exists
snapshot: true
- description: Records system/user time, db size, and many other system metrics
- interval: 0
+ interval: 1800
name: runtime_perf
platform: linux
query: runtime_perf
- description: Retrieves all the entries in the target system /etc/hosts file.
- interval: 0
+ interval: 86400
name: etc_hosts_snapshot
platform: linux
query: etc_hosts_snapshot
snapshot: true
- description: Snapshot query to retrieve the EC2 metadata for this endpoint
- interval: 0
+ interval: 86400
name: ec2_instance_metadata_snapshot
platform: linux
query: ec2_instance_metadata_snapshot
snapshot: true
- description: ""
- interval: 0
+ interval: 10
name: hardware_events
platform: linux
query: hardware_events
removed: false
- description: Information about memory usage on the system
- interval: 0
+ interval: 3600
name: memory_info
platform: linux
query: memory_info
- description: Displays information from /proc/stat file about the time the CPU
cores spent in different parts of the system
- interval: 0
+ interval: 3600
name: cpu_time
platform: linux
query: cpu_time
- description: Retrieves all the entries in the target system /etc/hosts file.
- interval: 0
+ interval: 3600
name: etc_hosts
platform: linux
query: etc_hosts
- description: Retrieves information from the Operating System where osquery is
currently running.
- interval: 0
+ interval: 86400
name: os_version
platform: linux
query: os_version
snapshot: true
- description: A snapshot of all processes running on the host. Useful for outlier
analysis.
- interval: 0
+ interval: 86400
name: processes_snapshot
platform: linux
query: processes_snapshot
snapshot: true
- description: Retrieves the current list of USB devices in the target system.
- interval: 0
+ interval: 120
name: usb_devices
platform: linux
query: usb_devices
- description: A line-delimited authorized_keys table.
- interval: 0
+ interval: 86400
name: authorized_keys
platform: linux
query: authorized_keys
+ - description: Display apt package manager sources.
+ interval: 86400
+ name: apt_sources
+ platform: ubuntu
+ query: apt_sources
+ snapshot: true
+ - description: Gather information about processes that are listening on a socket.
+ interval: 86400
+ name: listening_ports
+ platform: linux
+ query: listening_ports
+ snapshot: true
+ - description: Display yum package manager sources.
+ interval: 86400
+ name: yum_sources
+ platform: centos
+ query: yum_sources
+ snapshot: true
targets:
- labels: null
+ labels:
+ - Ubuntu Linux
+ - CentOS Linux
---
apiVersion: v1
kind: query
@@ -284,15 +281,6 @@ spec:
---
apiVersion: v1
kind: query
-spec:
- description: The percentage of total CPU time (system+user) consumed by osqueryd
- name: osquery_cpu_pct
- query: SELECT ((osqueryd_time*100)/(SUM(system_time) + SUM(user_time))) AS pct FROM
- processes, (SELECT (SUM(processes.system_time)+SUM(processes.user_time)) AS osqueryd_time
- FROM processes WHERE name='osqueryd');
----
-apiVersion: v1
-kind: query
spec:
description: Socket events collected from the audit framework
name: socket_events
@@ -329,7 +317,7 @@ kind: query
spec:
description: Record shell history for all users on system (instead of just root)
name: shell_history
- query: SELECT * FROM users JOIN shell_history USING (uid);
+ query: SELECT * FROM users CROSS JOIN shell_history USING (uid);
---
apiVersion: v1
kind: query
@@ -403,23 +391,6 @@ spec:
---
apiVersion: v1
kind: query
-spec:
- description: Records the system resources used by each query
- name: per_query_perf
- query: SELECT name, interval, executions, output_size, wall_time, (user_time/executions)
- AS avg_user_time, (system_time/executions) AS avg_system_time, average_memory
- FROM osquery_schedule;
----
-apiVersion: v1
-kind: query
-spec:
- description: Records avg rate of socket events since daemon started
- name: socket_rates
- query: SELECT COUNT(1) AS num, count(1)/s AS rate FROM socket_events, (SELECT (julianday('now')
- - 2440587.5)*86400.0 - start_time AS s FROM osquery_info LIMIT 1);
----
-apiVersion: v1
-kind: query
spec:
description: Local system users.
name: users
@@ -454,14 +425,6 @@ spec:
---
apiVersion: v1
kind: query
-spec:
- description: Records avg rate of process events since daemon started
- name: process_rates
- query: SELECT COUNT(1) AS num, count(1)/s AS rate FROM process_events, (SELECT (julianday('now')
- - 2440587.5)*86400.0 - start_time AS s FROM osquery_info LIMIT 1);
----
-apiVersion: v1
-kind: query
spec:
description: Information about the system hardware and name
name: system_info
@@ -473,7 +436,7 @@ spec:
description: Returns the private keys in the users ~/.ssh directory and whether
or not they are encrypted
name: user_ssh_keys
- query: SELECT * FROM users JOIN user_ssh_keys USING (uid);
+ query: SELECT * FROM users CROSS JOIN user_ssh_keys USING (uid);
---
apiVersion: v1
kind: query
@@ -593,4 +556,25 @@ kind: query
spec:
description: A line-delimited authorized_keys table.
name: authorized_keys
- query: SELECT * FROM users JOIN authorized_keys USING (uid);
+ query: SELECT * FROM users CROSS JOIN authorized_keys USING (uid);
+---
+apiVersion: v1
+kind: query
+spec:
+ description: Display apt package manager sources.
+ name: apt_sources
+ query: SELECT * FROM apt_sources;
+---
+apiVersion: v1
+kind: query
+spec:
+ description: Gather information about processes that are listening on a socket.
+ name: listening_ports
+ query: SELECT pid, port, processes.path, cmdline, cwd FROM listening_ports JOIN processes USING (pid) WHERE port!=0;
+---
+apiVersion: v1
+kind: query
+spec:
+ description: Display yum package manager sources.
+ name: yum_sources
+ query: SELECT name, baseurl, enabled, gpgcheck FROM yum_sources;
diff --git a/salt/fleet/files/packs/palantir/Fleet/Servers/options.yaml b/salt/fleet/files/packs/palantir/Fleet/Servers/options.yaml
index d1733f5b6..2329f085f 100644
--- a/salt/fleet/files/packs/palantir/Fleet/Servers/options.yaml
+++ b/salt/fleet/files/packs/palantir/Fleet/Servers/options.yaml
@@ -17,6 +17,7 @@ spec:
configuration:
- /etc/passwd
- /etc/shadow
+ - /etc/ld.so.preload
- /etc/ld.so.conf
- /etc/ld.so.conf.d/%%
- /etc/pam.d/%%
diff --git a/salt/fleet/files/packs/palantir/README.md b/salt/fleet/files/packs/palantir/README.md
index cade6dde4..a7ea61a37 100755
--- a/salt/fleet/files/packs/palantir/README.md
+++ b/salt/fleet/files/packs/palantir/README.md
@@ -10,11 +10,11 @@ exactly what we have done with our [unwanted-chrome-extensions](https://github.c
However, we have included additional query packs
that are more tailored to our specific environment that may be useful to some or at least serve as a reference to other organizations. osquery operates best when
operators have carefully considered the datasets to be collected and the potential use-cases for that data.
-* [performance-metrics.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/performance-metrics.conf)
-* [security-tooling-checks.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/security-tooling-checks.conf)
-* [windows-application-security.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/windows-application-security.conf)
-* [windows-compliance.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/windows-compliance.conf)
-* [windows-registry-monitoring.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/windows-registry-monitoring.conf)
+* [performance-metrics.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/performance-metrics.conf)
+* [security-tooling-checks.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/security-tooling-checks.conf)
+* [windows-application-security.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/windows-application-security.conf)
+* [windows-compliance.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/windows-compliance.conf)
+* [windows-registry-monitoring.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/windows-registry-monitoring.conf)
**Note**: We also utilize packs that are maintained in the official osquery project. In order to ensure you receive the most up to date version of the pack, please view them using the links below:
@@ -41,15 +41,15 @@ environment.
**Endpoints Configuration Overview**
* The configurations in this folder are meant for MacOS and Windows and the interval timings assume that these hosts are only online for ~8 hours per day
* The flags included in this configuration enable TLS client mode in osquery and assume it will be connected to a TLS server. We have also included non-TLS flagfiles for local testing.
-* File integrity monitoring on MacOS is enabled for specific files and directories defined in [osquery.conf](./Endpoints/MacOS/osquery.conf)
-* Events are disabled on Windows via the `--disable_events` flag in [osquery.flags](./Endpoints/Windows/osquery.flags). We use [Windows Event Forwarding](https://github.com/palantir/windows-event-forwarding) and don't have a need for osquery to process Windows event logs.
-* These configuration files utilize packs within the [packs](./Endpoints/packs) folder and may generate errors if started without them
+* File integrity monitoring on MacOS is enabled for specific files and directories defined in [osquery.conf](./Classic/Endpoints/MacOS/osquery.conf)
+* Events are disabled on Windows via the `--disable_events` flag in [osquery.flags](./Classic/Endpoints/Windows/osquery.flags). We use [Windows Event Forwarding](https://github.com/palantir/windows-event-forwarding) and don't have a need for osquery to process Windows event logs.
+* These configuration files utilize packs within the [packs](./Classic/Endpoints/packs) folder and may generate errors if started without them
**Servers Configuration Overview**
* This configuration assumes the destination operating system is Linux-based and that the hosts are online at all times
* Auditing mode is enabled for processes and network events. Ensure auditd is disabled or removed from the system where this will be running as it may conflict with osqueryd.
-* File integrity monitoring is enabled for specific files and directories defined in [osquery.conf](./Servers/Linux/osquery.conf)
-* Requires the [ossec-rootkit.conf](./Servers/Linux/packs/ossec-rootkit.conf) pack found to be located at `/etc/osquery/packs/ossec-rootkit.conf`
+* File integrity monitoring is enabled for specific files and directories defined in [osquery.conf](./Classic/Servers/Linux/osquery.conf)
+* Requires the [ossec-rootkit.conf](./Classic/Servers/Linux/packs/ossec-rootkit.conf) pack found to be located at `/etc/osquery/packs/ossec-rootkit.conf`
* The subscriber for `user_events` is disabled
## Quickstart - Classic
@@ -59,10 +59,10 @@ environment.
4. Logs are located in `/var/log/osquery` (Linux/MacOS) and `c:\ProgramData\osquery\logs` (Windows)
## Quickstart - Fleet
-1. Install Fleet version 2.0.0 or higher
-2. [Enroll hosts to your Fleet server](https://github.com/kolide/fleet/blob/master/docs/infrastructure/adding-hosts-to-fleet.md) by configuring the appropriate [flags](https://github.com/kolide/fleet/blob/master/tools/osquery/example_osquery.flags)
-2. [Configure the fleetctl utility](https://github.com/kolide/fleet/blob/master/docs/cli/setup-guide.md#fleetctl-setup) to communicate with your Fleet server
-3. Assuming you'd like to use the endpoint configs, you can use the commands below to apply them:
+Install Fleet version 2.0.0 or higher
+2. [Enroll hosts to your Fleet server](https://github.com/kolide/fleet/blob/master/docs/infrastructure/adding-hosts-to-fleet.md) by configuring the appropriate [flags]
+3. [Configure the fleetctl utility](https://github.com/kolide/fleet/blob/master/docs/cli/setup-guide.md#fleetctl-setup) to communicate with your Fleet server
+4. Assuming you'd like to use the endpoint configs, you can use the commands below to apply them:
```
git clone https://github.com/palantir/osquery-configuration.git
@@ -79,7 +79,7 @@ The desired osquery directory structure for Linux, MacOS, and Windows is outline
**Linux**
```
$ git clone https://github.com/palantir/osquery-configuration.git
-$ cp -R osquery-configuration/Servers/Linux/* /etc/osquery
+$ cp -R osquery-configuration/Fleet/Servers/Linux/* /etc/osquery
$ sudo osqueryctl start
/etc/osquery
@@ -93,8 +93,8 @@ $ sudo osqueryctl start
**MacOS**
```
$ git clone https://github.com/palantir/osquery-configuration.git
-$ cp osquery-configuration/Endpoints/MacOS/* /var/osquery
-$ cp osquery-configuration/Endpoints/packs/* /var/osquery/packs
+$ cp osquery-configuration/Fleet/Endpoints/MacOS/* /var/osquery
+$ cp osquery-configuration/Fleet/Endpoints/packs/* /var/osquery/packs
$ mv /var/osquery/osquery_no_tls.flags /var/osquery/osquery.flags ## Non-TLS server testing
$ sudo osqueryctl start
@@ -113,8 +113,8 @@ $ sudo osqueryctl start
**Windows**
```
PS> git clone https://github.com/palantir/osquery-configuration.git
-PS> copy-item osquery-configuration/Endpoints/Windows/* c:\ProgramData\osquery
-PS> copy-item osquery-configuration/Endpoints/packs/* c:\ProgramData\osquery\packs
+PS> copy-item osquery-configuration/Fleet/Endpoints/Windows/* c:\ProgramData\osquery
+PS> copy-item osquery-configuration/Fleet/Endpoints/packs/* c:\ProgramData\osquery\packs
PS> copy-item c:\ProgramData\osquery\osquery_no_tls.flags c:\ProgramData\osquery\osquery.flags -force ## Non-TLS server testing
PS> start-service osqueryd
diff --git a/salt/fleet/files/scripts/so-fleet-setup b/salt/fleet/files/scripts/so-fleet-setup
index 5bab20922..5d7895f99 100644
--- a/salt/fleet/files/scripts/so-fleet-setup
+++ b/salt/fleet/files/scripts/so-fleet-setup
@@ -4,6 +4,12 @@
#so-fleet-setup.sh $FleetEmail
+# Enable Fleet
+echo "Starting Docker Containers..."
+salt-call state.apply mysql queue=True >> /root/fleet-setup.log
+salt-call state.apply fleet queue=True >> /root/fleet-setup.log
+salt-call state.apply redis queue=True >> /root/fleet-setup.log
+
if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
echo "so-fleet container not running... Exiting..."
exit 1
@@ -15,11 +21,11 @@ docker exec so-fleet /bin/ash -c "echo {{ MAIN_IP }} {{ MAIN_HOSTNAME }} >> /et
docker exec so-fleet fleetctl config set --address https://{{ MAIN_HOSTNAME }}:443 --tls-skip-verify --url-prefix /fleet
docker exec so-fleet fleetctl setup --email $1 --password $initpw
-docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/options.yaml
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
docker exec so-fleet fleetctl apply -f /packs/hh/hhdefault.yml
docker exec so-fleet /bin/sh -c 'for pack in /packs/palantir/Fleet/Endpoints/packs/*.yaml; do fleetctl apply -f "$pack"; done'
+docker exec so-fleet fleetctl apply -f /packs/hh/osquery.conf
# Enable Fleet
@@ -34,6 +40,7 @@ sleep 120
echo "Installing launcher via salt..."
salt-call state.apply fleet.install_package queue=True >> /root/fleet-setup.log
+salt-call state.apply filebeat queue=True >> /root/fleet-setup.log
echo "Fleet Setup Complete - Login here: https://{{ MAIN_HOSTNAME }}"
echo "Your username is $2 and your password is $initpw"
diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls
index 9a6ba0330..0a916ae3d 100644
--- a/salt/fleet/init.sls
+++ b/salt/fleet/init.sls
@@ -1,6 +1,6 @@
-{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', None) -%}
-{%- set FLEETPASS = salt['pillar.get']('auth:fleet', None) -%}
-{%- set FLEETJWT = salt['pillar.get']('auth:fleet_jwt', None) -%}
+{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
+{%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%}
+{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MAINIP = salt['pillar.get']('node:mainip') %}
diff --git a/salt/healthcheck/init.sls b/salt/healthcheck/init.sls
index 627603099..94f8028ba 100644
--- a/salt/healthcheck/init.sls
+++ b/salt/healthcheck/init.sls
@@ -1,9 +1,6 @@
-### This state isn't used for anything. It was written to handle healthcheck scheduling,
-### but we handle that with beacons now.
-
{% set CHECKS = salt['pillar.get']('healthcheck:checks', {}) %}
{% set ENABLED = salt['pillar.get']('healthcheck:enabled', False) %}
-{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 30) %}
+{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 300) %}
{% if CHECKS and ENABLED %}
{% set STATUS = ['present','enabled'] %}
@@ -21,7 +18,7 @@ healthcheck_schedule_{{ STATUS[0] }}:
schedule.{{ STATUS[0] }}:
- name: healthcheck
- function: healthcheck.run
- - minutes: {{ SCHEDULE }}
+ - seconds: {{ SCHEDULE }}
healthcheck_schedule_{{ STATUS[1] }}:
schedule.{{ STATUS[1] }}:
diff --git a/salt/logstash/pipelines/config/so/9000_output_bro.conf.jinja b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
similarity index 73%
rename from salt/logstash/pipelines/config/so/9000_output_bro.conf.jinja
rename to salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
index 9ce08edf8..6def12650 100644
--- a/salt/logstash/pipelines/config/so/9000_output_bro.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
@@ -10,17 +10,16 @@
filter {
- if "zeek" in [tags] and "test_data" not in [tags] and "import" not in [tags] {
+ if [module] =~ "zeek" {
mutate {
##add_tag => [ "conf_file_9000"]
}
}
}
output {
- if "zeek" in [tags] and "test_data" not in [tags] and "import" not in [tags] {
-# stdout { codec => rubydebug }
+ if [module] =~ "zeek" {
elasticsearch {
- pipeline => "%{event_type}"
+ pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-zeek-%{+YYYY.MM.dd}"
template_name => "so-zeek"
diff --git a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
index ca9c90215..d09aae10b 100644
--- a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
@@ -9,11 +9,12 @@
output {
- if "osquery" in [tags] {
+ if [module] =~ "osquery" {
elasticsearch {
+ pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-osquery-%{+YYYY.MM.dd}"
template => "/so-common-template.json"
}
}
-}
\ No newline at end of file
+}
diff --git a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
index 7b587242a..640c50f39 100644
--- a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
@@ -9,16 +9,16 @@
# Last Update: 12/9/2016
filter {
- if [event_type] == "suricata" and "test_data" not in [tags] {
+ if [module] == "suricata" {
mutate {
##add_tag => [ "conf_file_9400"]
}
}
}
output {
- if [event_type] == "suricata" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
+ if [module] =~ "suricata" {
elasticsearch {
+ pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-ids-%{+YYYY.MM.dd}"
template => "/so-common-template.json"
diff --git a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
index 53dae8825..93bffca7d 100644
--- a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
@@ -9,7 +9,7 @@
# Last Update: 9/19/2018
filter {
- if [event_type] =~ "ossec" {
+ if [module] =~ "ossec" {
mutate {
##add_tag => [ "conf_file_9600"]
}
@@ -17,9 +17,9 @@ filter {
}
output {
- if [event_type] =~ "ossec" or "ossec" in [tags] {
+ if [module] =~ "ossec" {
elasticsearch {
- pipeline => "%{event_type}"
+ pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-ossec-%{+YYYY.MM.dd}"
template_name => "so-common"
diff --git a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
index 5da6d0b12..1e192a8ee 100644
--- a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
@@ -10,7 +10,7 @@
filter {
- if [event_type] =~ "strelka" {
+ if [module] =~ "strelka" {
mutate {
##add_tag => [ "conf_file_9000"]
}
@@ -19,6 +19,7 @@ filter {
output {
if [event_type] =~ "strelka" {
elasticsearch {
+ pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-strelka-%{+YYYY.MM.dd}"
template_name => "so-common"
diff --git a/salt/logstash/pipelines/templates/so/so-common-template.json b/salt/logstash/pipelines/templates/so/so-common-template.json
index 43fef682a..80983342a 100644
--- a/salt/logstash/pipelines/templates/so/so-common-template.json
+++ b/salt/logstash/pipelines/templates/so/so-common-template.json
@@ -17,6 +17,10 @@
"@version":{
"type":"keyword"
},
+ "osquery":{
+ "type":"object",
+ "dynamic": true
+ },
"geoip":{
"dynamic":true,
"properties":{
diff --git a/salt/master/files/add_minion.sh b/salt/master/files/add_minion.sh
new file mode 100755
index 000000000..220317193
--- /dev/null
+++ b/salt/master/files/add_minion.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+# This script adds pillar and schedule files securely
+
+MINION=$1
+
+ echo "Adding $1"
+ cp /tmp/$MINION/pillar/$MINION.sls /opt/so/saltstack/pillar/minions/
+ cp /tmp/$MINION/schedules/* /opt/so/saltstack/salt/patch/os/schedules/
+ rm -rf /tmp/$MINION
\ No newline at end of file
diff --git a/salt/master/files/registry/scripts/so-docker-download b/salt/master/files/registry/scripts/so-docker-download
index 488b45886..f6ca8f4e1 100644
--- a/salt/master/files/registry/scripts/so-docker-download
+++ b/salt/master/files/registry/scripts/so-docker-download
@@ -17,8 +17,6 @@ TRUSTED_CONTAINERS=( \
"so-thehive-es:$VERSION" \
"so-wazuh:$VERSION" \
"so-kibana:$VERSION" \
-"so-auth-ui:$VERSION" \
-"so-auth-api:$VERSION" \
"so-elastalert:$VERSION" \
"so-navigator:$VERSION" \
"so-filebeat:$VERSION" \
diff --git a/salt/mysql/etc/mypass b/salt/mysql/etc/mypass
index 2fb9844db..f5f781c10 100644
--- a/salt/mysql/etc/mypass
+++ b/salt/mysql/etc/mypass
@@ -1,2 +1,2 @@
-{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', None) -%}
+{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
{{ MYSQLPASS }}
diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls
index 981d27a73..8c5b91b99 100644
--- a/salt/mysql/init.sls
+++ b/salt/mysql/init.sls
@@ -1,4 +1,4 @@
-{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', None) %}
+{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %}
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
diff --git a/salt/pcap/files/sensoroni.json b/salt/pcap/files/sensoroni.json
index be2577c0a..81776b021 100644
--- a/salt/pcap/files/sensoroni.json
+++ b/salt/pcap/files/sensoroni.json
@@ -1,7 +1,7 @@
{%- set MASTER = grains['master'] -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{
- "logFilename": "/opt/sensoroni/log/sensoroni.log",
+ "logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"debug",
"agent": {
"pollIntervalMs": 10000,
diff --git a/salt/reactor/zeek.sls b/salt/reactor/zeek.sls
index c22d6f94d..f2e26b095 100644
--- a/salt/reactor/zeek.sls
+++ b/salt/reactor/zeek.sls
@@ -6,7 +6,7 @@ local = salt.client.LocalClient()
def run():
minionid = data['id']
- zeek_restart = data['zeek_restart']
+ zeek_restart = data['data']['zeek_restart']
logging.info('zeek_reactor: zeek_need_restarted:%s on:%s' % (zeek_restart, minionid))
if zeek_restart:
diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls
index 1de6b0c13..660b85ef5 100644
--- a/salt/strelka/init.sls
+++ b/salt/strelka/init.sls
@@ -105,4 +105,10 @@ strelka_filestream:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka
- name: so-strelka-filestream
- - command: strelka-filestream
+ - command: strelka-filestream
+
+strelka_zeek_extracted_sync:
+ cron.present:
+ - user: socore
+ - name: mv /nsm/zeek/extracted/complete/* /nsm/strelka
+ - minute: '*'
diff --git a/salt/top.sls b/salt/top.sls
index 59b95467c..1dc06098e 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -37,11 +37,14 @@ base:
- firewall
- pcap
- suricata
- - salt.beacons
+ - healthcheck
{%- if BROVER != 'SURICATA' %}
- zeek
{%- endif %}
- wazuh
+ {%- if STRELKA %}
+ - strelka
+ {%- endif %}
- filebeat
{%- if FLEETMASTER or FLEETNODE %}
- fleet.install_package
@@ -57,8 +60,7 @@ base:
- soc
- firewall
- idstools
- - auth
- - salt.beacons
+ - healthcheck
{%- if FLEETMASTER or FLEETNODE %}
- mysql
{%- endif %}
diff --git a/setup/so-functions b/setup/so-functions
index ee6fcbc89..d48ecff0e 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -29,9 +29,9 @@ accept_salt_key_local() {
accept_salt_key_remote() {
echo "Accept the key remotely on the master" >> $SETUPLOG 2>&1
# Delete the key just in case.
- ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -d $MINION_ID -y
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo salt-key -d $MINION_ID -y
salt-call state.apply ca
- ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -a $MINION_ID -y
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo salt-key -a $MINION_ID -y
}
@@ -65,8 +65,23 @@ add_socore_user_master() {
fi
groupadd --gid 939 socore
$ADDUSER --uid 939 --gid 939 --home-dir /opt/so socore
- # Set the password for socore that we got during setup
- echo socore:$COREPASS1 | chpasswd --crypt-method=SHA512
+
+}
+
+add_soremote_user_master() {
+
+ echo "Add soremote on the master" >>~/sosetup.log 2>&1
+ # Add user "soremote" to the master. This will be for things like accepting keys.
+ if [ $OS == 'centos' ]; then
+ local ADDUSER=adduser
+ else
+ local ADDUSER=useradd
+ fi
+ groupadd --gid 947 soremote
+ $ADDUSER --uid 947 --gid 947 soremote
+
+ # Set the password for soremote that we got during setup
+ echo soremote:$SOREMOTEPASS1 | chpasswd --crypt-method=SHA512
}
@@ -101,17 +116,17 @@ add_web_user() {
echo "Add user result: $?"
}
-# Create an auth pillar so that passwords survive re-install
-auth_pillar(){
+# Create an secrets pillar so that passwords survive re-install
+secrets_pillar(){
- if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
- echo "Creating Auth Pillar" >> $SETUPLOG 2>&1
+ if [ ! -f /opt/so/saltstack/pillar/secrets.sls ]; then
+ echo "Creating Secrets Pillar" >> $SETUPLOG 2>&1
mkdir -p /opt/so/saltstack/pillar
- echo "auth:" >> /opt/so/saltstack/pillar/auth.sls
- echo " mysql: $MYSQLPASS" >> /opt/so/saltstack/pillar/auth.sls
- echo " fleet: $FLEETPASS" >> /opt/so/saltstack/pillar/auth.sls
- echo " fleet_jwt: $FLEETJWT" >> /opt/so/saltstack/pillar/auth.sls
- echo " fleet_enroll-secret: False" >> /opt/so/saltstack/pillar/auth.sls
+ echo "secrets:" >> /opt/so/saltstack/pillar/secrets.sls
+ echo " mysql: $MYSQLPASS" >> /opt/so/saltstack/pillar/secrets.sls
+ echo " fleet: $FLEETPASS" >> /opt/so/saltstack/pillar/secrets.sls
+ echo " fleet_jwt: $FLEETJWT" >> /opt/so/saltstack/pillar/secrets.sls
+ echo " fleet_enroll-secret: False" >> /opt/so/saltstack/pillar/secrets.sls
fi
}
@@ -219,7 +234,7 @@ check_network_manager_conf() {
{
mv "$gmdconf" "${gmdconf}.bak"
touch "$gmdconf"
- systemctl restart network-manager
+ systemctl restart NetworkManager
} >> $SETUPLOG 2>&1
fi
@@ -232,9 +247,9 @@ check_network_manager_conf() {
fi
}
-check_socore_pass() {
+check_soremote_pass() {
- if [ $COREPASS1 == $COREPASS2 ]; then
+ if [ $SOREMOTEPASS1 == $SOREMOTEPASS2 ]; then
SCMATCH=yes
else
whiptail_passwords_dont_match
@@ -288,10 +303,10 @@ configure_minion() {
echo "mysql.host: '$MAINIP'" >> /etc/salt/minion
echo "mysql.port: 3306" >> /etc/salt/minion
echo "mysql.user: 'root'" >> /etc/salt/minion
- if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
+ if [ ! -f /opt/so/saltstack/pillar/secrets.sls ]; then
echo "mysql.pass: '$MYSQLPASS'" >> /etc/salt/minion
else
- OLDPASS=$(cat /opt/so/saltstack/pillar/auth.sls | grep mysql | awk {'print $2'})
+ OLDPASS=$(cat /opt/so/saltstack/pillar/secrets.sls | grep mysql | awk {'print $2'})
echo "mysql.pass: '$OLDPASS'" >> /etc/salt/minion
fi
elif [ $TYPE == 'helix' ]; then
@@ -337,8 +352,12 @@ copy_minion_tmp_files() {
fi
else
echo "scp pillar and salt files in $TMP to master /opt/so/saltstack"
- scp -prv -i /root/.ssh/so.key $TMP/pillar/* socore@$MSRV:/opt/so/saltstack/pillar >> $SETUPLOG 2>&1
- scp -prv -i /root/.ssh/so.key $TMP/salt/* socore@$MSRV:/opt/so/saltstack/salt >> $SETUPLOG 2>&1
+ ssh -i /root/.ssh/so.key soremote@$MSRV mkdir -p /tmp/$MINION_ID/pillar >> $SETUPLOG 2>&1
+ ssh -i /root/.ssh/so.key soremote@$MSRV mkdir -p /tmp/$MINION_ID/schedules >> $SETUPLOG 2>&1
+ scp -prv -i /root/.ssh/so.key $TMP/pillar/minions/* soremote@$MSRV:/tmp/$MINION_ID/pillar/ >> $SETUPLOG 2>&1
+ scp -prv -i /root/.ssh/so.key $TMP/salt/patch/os/schedules/* soremote@$MSRV:/tmp/$MINION_ID/schedules >> $SETUPLOG 2>&1
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/salt/master/files/add_minion.sh $MINION_ID >> $SETUPLOG 2>&1
+
fi
}
@@ -352,7 +371,7 @@ copy_ssh_key() {
chown -R $SUDO_USER:$SUDO_USER /root/.ssh
echo "Copying the SSH key to the master"
#Copy the key over to the master
- ssh-copy-id -f -i /root/.ssh/so.key socore@$MSRV
+ ssh-copy-id -f -i /root/.ssh/so.key soremote@$MSRV
}
@@ -376,7 +395,7 @@ create_sensor_bond() {
ethernet.mtu $MTU \
connection.autoconnect "yes" >> $SETUPLOG 2>&1
- for BNIC in "${BNICS[@]}"; do
+ for BNIC in ${BNICS[@]}; do
BONDNIC="$(echo -e "${BNIC}" | tr -d '"')" # Strip the quotes from the NIC names
# Check if specific offload features are able to be disabled
@@ -439,8 +458,8 @@ detect_os() {
fi
# Install network manager so we can do interface stuff
apt-get install -y network-manager
- /bin/systemctl enable network-manager
- /bin/systemctl start network-manager
+ /bin/systemctl enable NetworkManager
+ /bin/systemctl start NetworkManager
else
echo "We were unable to determine if you are using a supported OS." >> $SETUPLOG 2>&1
exit
@@ -467,7 +486,7 @@ disable_onion_user() {
}
disable_misc_network_features() {
- for UNUSED_NIC in "${FNICS[@]}"; do
+ for UNUSED_NIC in ${FNICS[@]}; do
# Disable DHCPv4/v6 and autoconnect
nmcli con mod "$UNUSED_NIC" \
ipv4.method disabled \
@@ -551,8 +570,6 @@ docker_seed_registry() {
if [ $INSTALLTYPE != 'HELIXSENSOR' ]; then
TRUSTED_CONTAINERS=( \
"so-acng:$VERSION" \
- "so-auth-api:$VERSION" \
- "so-auth-ui:$VERSION" \
"so-core:$VERSION" \
"so-thehive-cortex:$VERSION" \
"so-curator:$VERSION" \
@@ -1018,6 +1035,7 @@ reserve_group_ids() {
groupadd -g 941 stenographer
groupadd -g 945 ossec
groupadd -g 946 cyberchef
+ groupadd -g 947 soremote
}
@@ -1029,6 +1047,7 @@ saltify() {
if [ $INSTALLTYPE == 'MASTER' ] || [ $INSTALLTYPE == 'EVAL' ] || [ $INSTALLTYPE == 'HELIXSENSOR' ] || [ $INSTALLTYPE == 'MASTERSEARCH' ]; then
reserve_group_ids
+ yum -y install epel-release
yum -y install wget https://repo.saltstack.com/py3/redhat/salt-py3-repo-latest-2.el7.noarch.rpm
cp /etc/yum.repos.d/salt-py3-latest.repo /etc/yum.repos.d/salt-py3-2019-2.repo
sed -i 's/latest/2019.2/g' /etc/yum.repos.d/salt-py3-2019-2.repo
@@ -1262,7 +1281,7 @@ EOF
# Copy down the gpg keys and install them from the master
mkdir $TMP/gpg
echo "scp the gpg keys and install them from the master"
- scp -v -i /root/.ssh/so.key socore@$MSRV:/opt/so/gpg/* $TMP/gpg
+ scp -v -i /root/.ssh/so.key soremote@$MSRV:/opt/so/gpg/* $TMP/gpg
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH"
apt-key add $TMP/gpg/SALTSTACK-GPG-KEY.pub
apt-key add $TMP/gpg/GPG-KEY-WAZUH
@@ -1462,27 +1481,27 @@ set_initial_firewall_policy() {
fi
if [ $INSTALLTYPE == 'SENSOR' ]; then
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
fi
if [ $INSTALLTYPE == 'SEARCHNODE' ]; then
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
fi
if [ $INSTALLTYPE == 'HEAVYNODE' ]; then
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
fi
if [ $INSTALLTYPE == 'FLEET' ]; then
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key soremote@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
fi
if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
@@ -1558,13 +1577,14 @@ set_version() {
update_sudoers() {
- if ! grep -qE '^socore\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
- # Update Sudoers so that socore can accept keys without a password
- echo "socore ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
- echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | tee -a /etc/sudoers
- echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | tee -a /etc/sudoers
+ if ! grep -qE '^soremote\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
+ # Update Sudoers so that soremote can accept keys without a password
+ echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
else
- echo "User socore already granted sudo privileges"
+ echo "User soremote already granted sudo privileges"
fi
}
diff --git a/setup/so-setup b/setup/so-setup
index 1b08132c5..5a386793b 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -71,14 +71,6 @@ if (whiptail_you_sure) ; then
# Set management nic
whiptail_management_nic
-# whiptail_create_socore_user
-# SCMATCH=no
-# while [ $SCMATCH != yes ]; do
-# whiptail_create_socore_user_password1
-# whiptail_create_socore_user_password2
-# check_socore_pass
-# done
-
else
# Set the hostname
@@ -169,7 +161,7 @@ if (whiptail_you_sure) ; then
add_admin_user
disable_onion_user
fi
- #add_socore_user_master
+
# Install salt and dependencies
{
sleep 0.5
@@ -291,13 +283,13 @@ if (whiptail_you_sure) ; then
fi
fi
- # Get a password for the socore user
- whiptail_create_socore_user
+ # Get a password for the soremote user
+ whiptail_create_soremote_user
SCMATCH=no
while [ $SCMATCH != yes ]; do
- whiptail_create_socore_user_password1
- whiptail_create_socore_user_password2
- check_socore_pass
+ whiptail_create_soremote_user_password1
+ whiptail_create_soremote_user_password2
+ check_soremote_pass
done
# Get a password for the web admin user
@@ -314,7 +306,7 @@ if (whiptail_you_sure) ; then
set_hostname
set_version
generate_passwords
- auth_pillar
+ secrets_pillar
clear_master
mkdir -p /nsm
get_filesystem_root
@@ -331,10 +323,8 @@ if (whiptail_you_sure) ; then
fi
# Add the user so we can sit back and relax
- #echo ""
- #echo "**** Please set a password for socore. You will use this password when setting up other Nodes/Sensors"
- #echo ""
add_socore_user_master
+ add_soremote_user_master
# Install salt and dependencies
{
@@ -574,13 +564,13 @@ if (whiptail_you_sure) ; then
if [ $INSTALLTYPE == 'MASTERSEARCH' ]; then
# Find out how to handle updates
whiptail_master_updates
- # Get a password for the socore user
- whiptail_create_socore_user
+ # Get a password for the soremote user
+ whiptail_create_soremote_user
SCMATCH=no
while [ $SCMATCH != yes ]; do
- whiptail_create_socore_user_password1
- whiptail_create_socore_user_password2
- check_socore_pass
+ whiptail_create_soremote_user_password1
+ whiptail_create_soremote_user_password2
+ check_soremote_pass
done
fi
# Get a password for the web admin user
@@ -596,7 +586,7 @@ if (whiptail_you_sure) ; then
set_hostname
set_version
generate_passwords
- auth_pillar
+ secrets_pillar
clear_master
mkdir -p /nsm
get_filesystem_root
@@ -610,6 +600,7 @@ if (whiptail_you_sure) ; then
# Add the user so we can sit back and relax
add_socore_user_master
+ add_soremote_user_master
{
sleep 0.5
if [ $INSTALLTYPE == 'EVAL' ]; then
diff --git a/setup/so-whiptail b/setup/so-whiptail
index 5aa48ac2b..29ff43be5 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -132,26 +132,26 @@ whiptail_create_admin_user_password2() {
}
-whiptail_create_socore_user() {
+whiptail_create_soremote_user() {
- whiptail --title "Security Onion Setup" --msgbox "Set a password for the socore user. This account is used for adding sensors remotely." 8 75
+ whiptail --title "Security Onion Setup" --msgbox "Set a password for the soremote user. This account is used for adding sensors remotely." 8 75
}
-whiptail_create_socore_user_password1() {
+whiptail_create_soremote_user_password1() {
- COREPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
- "Enter a password for user socore" 10 75 3>&1 1>&2 2>&3)
+ SOREMOTEPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Enter a password for user soremote" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
-whiptail_create_socore_user_password2() {
+whiptail_create_soremote_user_password2() {
- COREPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
- "Re-enter a password for user socore" 10 75 3>&1 1>&2 2>&3)
+ SOREMOTEPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Re-enter a password for user soremote" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
diff --git a/upgrade/so-update-functions b/upgrade/so-update-functions
index ef7bb4597..4f0e48f66 100644
--- a/upgrade/so-update-functions
+++ b/upgrade/so-update-functions
@@ -35,8 +35,6 @@ HOSTNAME=$(hostname)
if [ $MASTERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng:$BUILD$UPDATEVERSION" \
- "so-auth-api:$BUILD$UPDATEVERSION" \
- "so-auth-ui:$BUILD$UPDATEVERSION" \
"so-core:$BUILD$UPDATEVERSION" \
"so-thehive-cortex:$BUILD$UPDATEVERSION" \
"so-curator:$BUILD$UPDATEVERSION" \