Move files out of common

This commit is contained in:
Mike Reeves
2023-05-02 09:40:02 -04:00
parent 2d4f4791e0
commit e60e21d9ff
47 changed files with 0 additions and 325 deletions

View File

@@ -1,11 +0,0 @@
#!/usr/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
echo "Please use the Configuration section in SOC to allow hosts"
echo ""
echo "If you need command line options on adding hosts please run so-firewall"

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
echo ""
echo "Hosts/Networks that have access to login to the Security Onion Console:"
so-firewall includedhosts analyst

View File

@@ -1,138 +0,0 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import ipaddress
import textwrap
import os
import subprocess
import sys
import argparse
import re
from lxml import etree as ET
from xml.dom import minidom
LOCAL_SALT_DIR='/opt/so/saltstack/local'
VALID_ROLES = {
'a': { 'role': 'analyst','desc': 'Analyst - 80/tcp, 443/tcp' },
'b': { 'role': 'beats_endpoint', 'desc': 'Logstash Beat - 5044/tcp' },
'e': { 'role': 'elasticsearch_rest', 'desc': 'Elasticsearch REST API - 9200/tcp' },
'f': { 'role': 'strelka_frontend', 'desc': 'Strelka frontend - 57314/tcp' },
's': { 'role': 'syslog', 'desc': 'Syslog device - 514/tcp/udp' },
}
def validate_ip_cidr(ip_cidr: str) -> bool:
try:
ipaddress.ip_address(ip_cidr)
except ValueError:
try:
ipaddress.ip_network(ip_cidr)
except ValueError:
return False
return True
def role_prompt() -> str:
print()
print('Choose the role for the IP or Range you would like to deny')
print()
for role in VALID_ROLES:
print(f'[{role}] - {VALID_ROLES[role]["desc"]}')
print()
role = input('Please enter your selection: ')
if role in VALID_ROLES.keys():
return VALID_ROLES[role]['role']
else:
print(f'Invalid role \'{role}\', please try again.', file=sys.stderr)
sys.exit(1)
def ip_prompt() -> str:
ip = input('Enter a single ip address or range to deny (ex: 10.10.10.10 or 10.10.0.0/16): ')
if validate_ip_cidr(ip):
return ip
else:
print(f'Invalid IP address or CIDR block \'{ip}\', please try again.', file=sys.stderr)
sys.exit(1)
def apply(role: str, ip: str) -> int:
firewall_cmd = ['so-firewall', 'excludehost', role, ip]
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'firewall', 'queue=True']
print(f'Removing {ip} from the {role} role. This can take a few seconds...')
cmd = subprocess.run(firewall_cmd)
if cmd.returncode == 0:
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
else:
return cmd.returncode
def main():
if os.geteuid() != 0:
print('You must run this script as root', file=sys.stderr)
sys.exit(1)
main_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(f'''\
additional information:
To use this script in interactive mode call it with no arguments
'''
))
group = main_parser.add_argument_group(title='roles')
group.add_argument('-a', dest='roles', action='append_const', const=VALID_ROLES['a']['role'], help="Analyst - 80/tcp, 443/tcp")
group.add_argument('-b', dest='roles', action='append_const', const=VALID_ROLES['b']['role'], help="Logstash Beat - 5044/tcp")
group.add_argument('-e', dest='roles', action='append_const', const=VALID_ROLES['e']['role'], help="Elasticsearch REST API - 9200/tcp")
group.add_argument('-f', dest='roles', action='append_const', const=VALID_ROLES['f']['role'], help="Strelka frontend - 57314/tcp")
group.add_argument('-s', dest='roles', action='append_const', const=VALID_ROLES['s']['role'], help="Syslog device - 514/tcp/udp")
ip_g = main_parser.add_argument_group(title='allow')
ip_g.add_argument('-i', help="IP or CIDR block to disallow connections from, requires at least one role argument", metavar='', dest='ip')
args = main_parser.parse_args(sys.argv[1:])
if args.roles is None:
role = role_prompt()
ip = ip_prompt()
try:
return_code = apply(role, ip)
except Exception as e:
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
return_code = e.errno
sys.exit(return_code)
elif args.roles is not None and args.ip is None:
if os.environ.get('IP') is None:
main_parser.print_help()
sys.exit(1)
else:
args.ip = os.environ['IP']
if validate_ip_cidr(args.ip):
try:
for role in args.roles:
return_code = apply(role, args.ip)
if return_code > 0:
break
except Exception as e:
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
return_code = e.errno
else:
print(f'Invalid IP address or CIDR block \'{args.ip}\', please try again.', file=sys.stderr)
return_code = 1
sys.exit(return_code)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)

View File

@@ -1,14 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
. /usr/sbin/so-image-common
require_manager
update_docker_containers "refresh"

View File

@@ -1,34 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
#so-elastic-agent-gen-installers $FleetHost $EnrollmentToken
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints")) | .api_key')
#FLEETHOST=$(lookup_pillar "server:url" "elasticfleet")
FLEETHOST="{{ GLOBALS.manager_ip }}"
#FLEETHOST=$1
#ENROLLMENTOKEN=$2
CONTAINERGOOS=( "linux" "darwin" "windows" )
#rm -rf /tmp/elastic-agent-workspace
#mkdir -p /tmp/elastic-agent-workspace
for OS in "${CONTAINERGOOS[@]}"
do
printf "\n\nGenerating $OS Installer..."
#cp /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/so-elastic-agent-*-$OS-x86_64.tar.gz /tmp/elastic-agent-workspace/$OS.tar.gz
docker run -e CGO_ENABLED=0 -e GOOS=$OS \
--mount type=bind,source=/etc/ssl/certs/,target=/workspace/files/cert/ \
--mount type=bind,source=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/,target=/output/ \
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} go build -ldflags "-X main.fleetHost=$FLEETHOST -X main.enrollmentToken=$ENROLLMENTOKEN" -o /output/so-elastic-agent_$OS
printf "\n $OS Installer Generated..."
done

View File

@@ -1,144 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
source $(dirname $0)/so-common
require_manager
user=$1
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
elasticAuthPillarFile=${ELASTIC_AUTH_PILLAR_FILE:-/opt/so/saltstack/local/pillar/elasticsearch/auth.sls}
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <user>"
echo ""
echo " where <user> is one of the following:"
echo ""
echo " all: Reset the password for the so_elastic, so_kibana, so_logstash, so_beats, and so_monitor users"
echo " so_elastic: Reset the password for the so_elastic user"
echo " so_kibana: Reset the password for the so_kibana user"
echo " so_logstash: Reset the password for the so_logstash user"
echo " so_beats: Reset the password for the so_beats user"
echo " so_monitor: Reset the password for the so_monitor user"
echo ""
exit 1
fi
# function to create a lock so that the so-user sync cronjob can't run while this is running
function lock() {
# Obtain file descriptor lock
exec 99>/var/tmp/so-user.lock || fail "Unable to create lock descriptor; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
flock -w 10 99 || fail "Another process is using so-user; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
trap 'rm -f /var/tmp/so-user.lock' EXIT
}
function unlock() {
rm -f /var/tmp/so-user.lock
}
function fail() {
msg=$1
echo "$1"
exit 1
}
function removeSingleUserPass() {
local user=$1
sed -i '/user: '"${user}"'/{N;/pass: /d}' "${elasticAuthPillarFile}"
}
function removeAllUserPass() {
local userList=("so_elastic" "so_kibana" "so_logstash" "so_beats" "so_monitor")
for u in ${userList[@]}; do
removeSingleUserPass "$u"
done
}
function removeElasticUsersFile() {
rm -f "$elasticUsersFile"
}
function createElasticAuthPillar() {
salt-call state.apply elasticsearch.auth queue=True
}
# this will disable highstate to prevent a highstate from starting while the script is running
# will also disable salt.minion-state-apply-test allow so-salt-minion-check cronjob to restart salt-minion service incase
function disableSaltStates() {
printf "\nDisabling salt.minion-state-apply-test and highstate from running.\n\n"
salt-call state.disable salt.minion-state-apply-test
salt-call state.disable highstate
}
function enableSaltStates() {
printf "\nEnabling salt.minion-state-apply-test and highstate.\n\n"
salt-call state.enable salt.minion-state-apply-test
salt-call state.enable highstate
}
function killAllSaltJobs() {
printf "\nKilling all running salt jobs.\n\n"
salt-call saltutil.kill_all_jobs
}
function soUserSync() {
# apply this state to update /opt/so/saltstack/local/salt/elasticsearch/curl.config on the manager
salt-call state.sls_id elastic_curl_config_distributed manager queue=True
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' saltutil.kill_all_jobs
# apply this state to get the curl.config
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.sls_id elastic_curl_config common queue=True
$(dirname $0)/so-user sync
printf "\nApplying logstash state to the appropriate nodes.\n\n"
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply logstash queue=True
printf "\nApplying kibana state to the appropriate nodes.\n\n"
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch' state.apply kibana queue=True
printf "\nApplying curator state to the appropriate nodes.\n\n"
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply curator queue=True
}
function highstateManager() {
killAllSaltJobs
printf "\nRunning highstate on the manager to finalize password reset.\n\n"
salt-call state.highstate -linfo queue=True
}
case "${user}" in
so_elastic | so_kibana | so_logstash | so_beats | so_monitor)
lock
killAllSaltJobs
disableSaltStates
removeSingleUserPass "$user"
createElasticAuthPillar
removeElasticUsersFile
unlock
soUserSync
enableSaltStates
highstateManager
;;
all)
lock
killAllSaltJobs
disableSaltStates
removeAllUserPass
createElasticAuthPillar
removeElasticUsersFile
unlock
soUserSync
enableSaltStates
highstateManager
;;
*)
fail "Unsupported user: $user"
;;
esac
exit 0

View File

@@ -1,154 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
. /usr/sbin/so-common
SKIP=0
#########################################
# Options
#########################################
usage()
{
cat <<EOF
Security Onion Elastic Clear
Options:
-h This message
-y Skip interactive mode
EOF
}
while getopts "h:cdely" OPTION
do
case $OPTION in
h)
usage
exit 0
;;
c)
DELETE_CASES_DATA=1
SKIP=1
;;
d)
DONT_STOP_SERVICES=1
SKIP=1
;;
e)
DELETE_ELASTALERT_DATA=1
SKIP=1
;;
l)
DELETE_LOG_DATA=1
SKIP=1
;;
y)
DELETE_CASES_DATA=1
DELETE_ELASTALERT_DATA=1
DELETE_LOG_DATA=1
SKIP=1
;;
*)
usage
exit 0
;;
esac
done
if [ $SKIP -ne 1 ]; then
# List indices
echo
curl -K /opt/so/conf/elasticsearch/curl.config -k -L https://{{ NODEIP }}:9200/_cat/indices?v
echo
# Inform user we are about to delete all data
echo
echo "This script will delete all data (documents, indices, etc.) in the Elasticsearch database."
echo
echo "If you would like to proceed, please type "AGREE" and hit ENTER."
echo
# Read user input
read INPUT
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
fi
if [ -z "$DONT_STOP_SERVICES" ]; then
# Stop Elastic Agent
for i in $(pgrep elastic-agent | grep -v grep); do
kill -9 $i;
done
# Check to see if Elastic Fleet, Logstash, Elastalert are running
#EF_ENABLED=$(so-status | grep elastic-fleet)
LS_ENABLED=$(so-status | grep logstash)
EA_ENABLED=$(so-status | grep elastalert)
#if [ ! -z "$EF_ENABLED" ]; then
# /usr/sbin/so-elastic-fleet-stop
#fi
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-stop
fi
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-stop
fi
fi
if [ ! -z "$DELETE_CASES_DATA" ]; then
# Delete Cases data
echo "Deleting Cases data..."
INDXS=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index | grep "so-case")
for INDX in ${INDXS}
do
echo "Deleting $INDX"
/usr/sbin/so-elasticsearch-query ${INDX} -XDELETE > /dev/null 2>&1
done
fi
# Delete Elastalert data
if [ ! -z "$DELETE_ELASTALERT_DATA" ]; then
# Delete Elastalert data
echo "Deleting Elastalert data..."
INDXS=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index | grep "elastalert")
for INDX in ${INDXS}
do
echo "Deleting $INDX"
/usr/sbin/so-elasticsearch-query ${INDX} -XDELETE > /dev/null 2>&1
done
fi
# Delete log data
if [ ! -z "$DELETE_LOG_DATA" ]; then
echo "Deleting log data ..."
DATASTREAMS=$(/usr/sbin/so-elasticsearch-query _data_stream | jq -r '.[] |.[].name')
for DATASTREAM in ${DATASTREAMS}
do
# Delete the data stream
echo "Deleting $DATASTREAM..."
/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} -XDELETE > /dev/null 2>&1
done
fi
if [ -z "$DONT_STOP_SERVICES" ]; then
#Start Logstash
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-start
fi
#Start Elastic Fleet
#if [ ! -z "$EF_ENABLED" ]; then
# /usr/sbin/so-elastic-fleet-start
#fi
#Start Elastalert
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-start
fi
# Start Elastic Agent
/usr/bin/elastic-agent restart
fi

View File

@@ -1,25 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# Source common settings
. /usr/sbin/so-common
# Check for log files
for FILE in /opt/so/log/elasticsearch/*.log /opt/so/log/logstash/*.log /opt/so/log/kibana/*.log /opt/so/log/elastalert/*.log /opt/so/log/curator/*.log /opt/so/log/freqserver/*.log /opt/so/log/nginx/*.log; do
# If file exists, then look for errors or warnings
if [ -f $FILE ]; then
MESSAGE=`grep -i 'ERROR\|FAIL\|WARN' $FILE`
if [ ! -z "$MESSAGE" ]; then
header $FILE
echo $MESSAGE | sed 's/WARN/\nWARN/g' | sed 's/WARNING/\nWARNING/g' | sed 's/ERROR/\nERROR/g' | sort | uniq -c | sort -nr
echo
fi
fi
done

View File

@@ -1,31 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-import']%}
/usr/sbin/so-restart elasticsearch $1
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
/usr/sbin/so-restart kibana $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
/usr/sbin/so-restart logstash $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
/usr/sbin/so-restart curator $1
{%- endif %}
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
/usr/sbin/so-restart elastalert $1
{%- endif %}

View File

@@ -1,31 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-import']%}
/usr/sbin/so-start elasticsearch $1
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
/usr/sbin/so-start kibana $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
/usr/sbin/so-start logstash $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
/usr/sbin/so-start curator $1
{%- endif %}
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
/usr/sbin/so-start elastalert $1
{%- endif %}

View File

@@ -1,31 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-import']%}
/usr/sbin/so-stop elasticsearch $1
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
/usr/sbin/so-stop kibana $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
/usr/sbin/so-stop logstash $1
{%- endif %}
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
/usr/sbin/so-stop curator $1
{%- endif %}
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
/usr/sbin/so-stop elastalert $1
{%- endif %}

View File

@@ -1,104 +0,0 @@
#!/usr/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
if [[ $# -lt 1 ]]; then
echo "Usage: $0 --role=<ROLE> --ip=<IP ADDRESS> --apply=<true|false>"
echo ""
echo " Example: so-firewall --role=sensor --ip=192.168.254.100 --apply=true"
echo ""
exit 1
fi
for i in "$@"; do
case $i in
-r=*|--role=*)
ROLE="${i#*=}"
shift
;;
-i=*|--ip=*)
IP="${i#*=}"
shift
;;
-a=*|--apply*)
APPLY="${i#*=}"
shift
;;
-*|--*)
echo "Unknown option $i"
exit 1
;;
*)
;;
esac
done
ROLE=${ROLE,,}
APPLY=${APPLY,,}
function rolecall() {
THEROLE=$1
THEROLES="analyst analyst_workstations beats_endpoint beats_endpoint_ssl elastic_agent_endpoint elasticsearch_rest endgame eval fleet heavynodes idh manager managersearch receivers searchnodes sensors standalone strelka_frontend syslog"
for AROLE in $THEROLES; do
if [ "$AROLE" = "$THEROLE" ]; then
return 0
fi
done
return 1
}
# Make sure the required options are specified
if [ -z "$ROLE" ]; then
echo "Please specify a role with --role="
exit 1
fi
if [ -z "$IP" ]; then
echo "Please specify an IP address with --ip="
exit 1
fi
# Are we dealing with a role that this script supports?
if rolecall "$ROLE"; then
echo "$ROLE is a supported role"
else
echo "This is not a supported role"
exit 1
fi
# Are we dealing with an IP?
if verify_ip4 "$IP"; then
echo "$IP is a valid IP or CIDR"
else
echo "$IP is not a valid IP or CIDR"
exit 1
fi
local_salt_dir=/opt/so/saltstack/local/salt/firewall
# Let's see if the file exists and if it does, let's see if the IP exists.
if [ -f "$local_salt_dir/hostgroups/$ROLE" ]; then
if grep -q $IP "$local_salt_dir/hostgroups/$ROLE"; then
echo "Host already exists"
exit 0
fi
fi
# If you have reached this part of your quest then let's add the IP
echo "Adding $IP to the $ROLE role"
echo "$IP" >> $local_salt_dir/hostgroups/$ROLE
# Check to see if we are applying this right away.
if [ "$APPLY" = "true" ]; then
echo "Applying the firewall rules"
salt-call state.apply firewall queue=True
echo "Firewall rules have been applied... Review logs further if there were errors."
echo ""
else
echo "Firewall rules will be applied next salt run"
fi

View File

@@ -1,82 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
if [[ $# -lt 1 ]]; then
echo "Usage: $0 --role=<ROLE> --ip=<IP ADDRESS>"
echo ""
echo " Example: so-firewall-minion --role=manager --ip=192.168.254.100"
echo ""
exit 1
fi
for i in "$@"; do
case $i in
-r=*|--role=*)
ROLE="${i#*=}"
shift
;;
-i=*|--ip=*)
IP="${i#*=}"
shift
;;
-*|--*)
echo "Unknown option $i"
exit 1
;;
*)
;;
esac
done
ROLE=${ROLE^^}
if [ -z "$ROLE" ]; then
echo "Please specify a role with --role="
exit 1
fi
if [ -z "$IP" ]; then
echo "Please specify an IP address with --ip="
exit 1
fi
case "$ROLE" in
'MANAGER')
so-firewall --role=manager --ip="$IP"
;;
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
so-firewall --role=manager --ip="$IP"
so-firewall --role=sensors --ip="$IP"
so-firewall --apply=true --role=searchnodes --ip="$IP"
;;
'FLEET' | 'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER')
case "$ROLE" in
'FLEET')
so-firewall --apply=true --role=fleet --ip="$IP"
;;
'SENSOR')
so-firewall --apply=true --role=sensors --ip="$IP"
;;
'SEARCHNODE')
so-firewall --apply=true --role=searchnodes --ip="$IP"
;;
'HEAVYNODE')
so-firewall --role=sensors --ip="$IP"
so-firewall --apply=true --role=heavynodes --ip="$IP"
;;
'IDH')
so-firewall --apply=true --role=sensors --ip="$IP"
;;
'RECEIVER')
so-firewall --apply=true --role=receivers --ip="$IP"
;;
esac
;;
esac

View File

@@ -1,10 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
curl -K /opt/so/conf/elasticsearch/curl.config-X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"

View File

@@ -1,364 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [ -f /usr/sbin/so-common ]; then
. /usr/sbin/so-common
fi
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
if [[ $# -lt 1 ]]; then
echo "Usage: $0 -o=<operation> -m=[id]"
echo ""
echo " where <operation> is one of the following:"
echo ""
echo " list: Lists all keys with hashes"
echo " accept: Accepts a new key and adds the minion files"
echo " delete: Removes the key and deletes the minion files"
echo " reject: Rejects a key"
echo " test: Perform minion test"
echo ""
exit 1
fi
for i in "$@"; do
case $i in
-o=*|--operation=*)
OPERATION="${i#*=}"
shift
;;
-m=*|--minionid=*)
MINION_ID="${i#*=}"
shift
;;
-e=*|--esheap=*)
ES_HEAP_SIZE="${i#*=}"
shift
;;
-n=*|--mgmtnic=*)
MNIC="${i#*=}"
shift
;;
-d=*|--description=*)
NODE_DESCRIPTION="${i#*=}"
shift
;;
-a=*|--monitor=*)
INTERFACE="${i#*=}"
shift
;;
-i=*|--ip=*)
MAINIP="${i#*=}"
shift
;;
-*|--*)
echo "Unknown option $i"
exit 1
;;
*)
;;
esac
done
PILLARFILE=/opt/so/saltstack/local/pillar/minions/$MINION_ID.sls
ADVPILLARFILE=/opt/so/saltstack/local/pillar/minions/adv_$MINION_ID.sls
function getinstallinfo() {
# Pull from file
INSTALLVARS=$(sudo salt "$MINION_ID" cp.get_file_str /opt/so/install.txt --out=newline_values_only)
source <(echo $INSTALLVARS)
}
function testminion() {
# Always run on the host, since this is going to be the manager of a distributed grid, or an eval/standalone.
# Distributed managers must run this in order for the sensor nodes to have access to the so-tcpreplay image.
so-test
result=$?
# If this so-minion script is not running on the given minion ID, run so-test remotely on the sensor as well
local_id=$(lookup_grain id)
if [[ ! "$local_id" =~ "${MINION_ID}_" ]]; then
salt "$MINION_ID" cmd.run 'so-test'
result=$?
fi
exit $result
}
function listminions() {
salt-key list -F --out=json
exit $?
}
function rejectminion() {
salt-key -y -r $MINION_ID
exit $?
}
function acceptminion() {
salt-key -y -a $MINION_ID
}
function deleteminion() {
salt-key -y -d $MINION_ID
}
function deleteminionfiles () {
rm -f $PILLARFILE
rm -f $ADVPILLARFILE
}
# Create the minion file
function create_minion_files() {
mkdir -p /opt/so/saltstack/local/pillar/minions
touch $ADVPILLARFILE
if [ -f "$PILLARFILE" ]; then
rm $PILLARFILE
fi
}
# Add Elastic settings to the minion file
function add_elastic_to_minion() {
printf '%s\n'\
"elasticsearch:"\
" esheap: '$ES_HEAP_SIZE'"\
" " >> $PILLARFILE
}
# Add Elastic Fleet Server settings to the minion file
function add_fleet_to_minion() {
# Create ES Token for Fleet server (Curl to Kibana API)
# TODO: Add error handling
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
# Write out settings to minion file
printf '%s\n'\
"elasticfleet:"\
" server:"\
" es_token: '$ESTOKEN'"\
" " >> $PILLARFILE
}
# Add IDH Services info to the minion file
function add_idh_to_minion() {
printf '%s\n'\
"idh:"\
" restrict_management_ip: $IDH_MGTRESTRICT"\
" services:" >> "$PILLARFILE"
IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES"
for service in ${IDH_SERVICES_ARRAY[@]}; do
echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE"
done
}
function add_logstash_to_minion() {
# Create the logstash advanced pillar
printf '%s\n'\
"logstash:"\
" config:"\
" pipeline_x_workers: $CPUCORES"\
" settings:"\
" lsheap: $LSHEAP"\
" " >> $PILLARFILE
}
# Analyst Workstation
function add_analyst_to_minion() {
printf '%s\n'\
"host:"\
" mainint: '$MNIC'"\
"workstation:"\
" gui:"\
" enabled: true"\
"sensoroni:"\
" node_description: '${NODE_DESCRIPTION//\'/''}'" >> $PILLARFILE
}
# Add basic host info to the minion file
function add_host_to_minion() {
printf '%s\n'\
"host:"\
" mainip: '$MAINIP'"\
" mainint: '$MNIC'" >> $PILLARFILE
}
# Add sensoroni specific information - Can we pull node_adrees from the host pillar?
function add_sensoroni_to_minion() {
printf '%s\n'\
"sensoroni:"\
" node_description: '${NODE_DESCRIPTION//\'/''}'"\
" " >> $PILLARFILE
}
# Sensor settings for the minion pillar
function add_sensor_to_minion() {
echo "sensor:" >> $PILLARFILE
echo " interface: '$INTERFACE'" >> $PILLARFILE
echo " mtu: 9000" >> $PILLARFILE
echo "zeek:" >> $PILLARFILE
echo " config:" >> $PILLARFILE
echo " node:" >> $PILLARFILE
echo " lb_procs: '$CORECOUNT'" >> $PILLARFILE
echo "suricata:" >> $PILLARFILE
echo " config:" >> $PILLARFILE
echo " af-packet:" >> $PILLARFILE
echo " threads: '$CORECOUNT'" >> $PILLARFILE
echo "pcap:" >> $PILLARFILE
echo " enabled: True" >> $PILLARFILE
}
function create_fleet_policy() {
JSON_STRING=$( jq -n \
--arg NAME "FleetServer_$LSHOSTNAME" \
--arg DESC "Fleet Server - $LSHOSTNAME" \
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":1209600,"has_fleet_server":true}'
)
# Create Fleet Sever Policy
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
JSON_STRING_UPDATE=$( jq -n \
--arg NAME "FleetServer_$LSHOSTNAME" \
--arg DESC "Fleet Server - $LSHOSTNAME" \
'{"name":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":1209600,"data_output_id":"so-manager_elasticsearch"}'
)
# Update Fleet Policy - ES Output
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_$LSHOSTNAME" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING_UPDATE"
}
function update_fleet_host_urls() {
# Query for current Fleet Host URLs & append New Fleet Node Hostname & IP
JSON_STRING=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts' | jq --arg HOSTNAME "https://$LSHOSTNAME:8220" --arg IP "https://$MAINIP:8220" -r '.items[].host_urls += [ $HOSTNAME, $IP ] | {"name":"Default","host_urls": .items[].host_urls,"is_default":true,"proxy_id":null}')
# Update Fleet Host URLs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/fleet_server_hosts/fleet-default-fleet-server-host" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
function update_logstash_outputs() {
# Query for current Logstash outputs & append New Fleet Node Hostname & IP
JSON_STRING=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' | jq --arg HOSTNAME "$LSHOSTNAME:5055" --arg IP "$MAINIP:5055" -r '.item.hosts += [ $HOSTNAME, $IP ] | {"name":"grid-logstash","type":"logstash","hosts": .item.hosts,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
# Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
function updateMine() {
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC"
}
function apply_ES_state() {
salt-call state.apply elasticsearch concurrent=True
}
function createEVAL() {
add_elastic_to_minion
add_logstash_to_minion
add_sensor_to_minion
}
function createFLEET() {
add_fleet_to_minion
add_logstash_to_minion
create_fleet_policy
update_fleet_host_urls
update_logstash_outputs
}
function createIDH() {
add_idh_to_minion
}
function createIMPORT() {
add_elastic_to_minion
add_logstash_to_minion
add_sensor_to_minion
}
function createHEAVYNODE() {
add_elastic_to_minion
add_logstash_to_minion
add_sensor_to_minion
}
function createMANAGER() {
add_elastic_to_minion
add_logstash_to_minion
}
function createMANAGERSEARCH() {
add_elastic_to_minion
add_logstash_to_minion
}
function createSENSOR() {
add_sensor_to_minion
}
function createSEARCHNODE() {
add_elastic_to_minion
add_logstash_to_minion
updateMine
apply_ES_state
}
function createSTANDALONE() {
add_elastic_to_minion
add_logstash_to_minion
add_sensor_to_minion
}
function testConnection() {
retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$?
if [[ $ret != 0 ]]; then
echo "The Minion has been accepted but is not online. Try again later"
echo "Deleting the key"
deleteminion
exit 1
fi
}
if [[ "$OPERATION" = 'list' ]]; then
listminions
fi
if [[ "$OPERATION" = 'delete' ]]; then
deleteminionfiles
deleteminion
fi
if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
# Skip this if its setup
if [ $OPERATION != 'setup' ]; then
# Accept the salt key
acceptminion
# Test to see if the minion was accepted
testConnection
# Pull the info from the file to build what is needed
getinstallinfo
fi
# Check to see if nodetype is set
if [ -z $NODETYPE ]; then
echo "No node type specified"
exit 1
fi
create_minion_files
add_host_to_minion
add_sensoroni_to_minion
create$NODETYPE
echo "Minion file created for $MINION_ID"
fi
if [[ "$OPERATION" = 'test' ]]; then
testminion
fi

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart nginx $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start nginx $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop nginx $1

View File

@@ -1,18 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [ $# -lt 2 ]; then
echo "Usage: $0 <steno-query> Output-Filename"
exit 1
fi
docker exec -t so-sensoroni scripts/stenoquery.sh "$1" -w /nsm/pcapout/$2.pcap
echo ""
echo "If successful, the output was written to: /nsm/pcapout/$2.pcap"

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart steno $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start steno $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop steno $1

View File

@@ -1,454 +0,0 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
"""
Local exit codes:
- General error: 1
- Invalid argument: 2
- File error: 3
"""
import sys, os, subprocess, argparse, signal
import copy
import re
import textwrap
import yaml
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
salt_proc: subprocess.CompletedProcess = None
def print_err(string: str):
print(string, file=sys.stderr)
def check_apply(args: dict, prompt: bool = True):
if args.apply:
print('Configuration updated. Applying changes:')
return apply()
else:
if prompt:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
answer = input(message)
while answer.lower() not in [ 'y', 'n', '' ]:
answer = input(message)
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying changes:')
return apply()
else:
return 0
def apply():
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'idstools.sync_files', 'queue=True']
update_cmd = ['so-rule-update']
print('Syncing config files...')
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
if cmd.returncode == 0:
print('Updating rules...')
return subprocess.run(update_cmd).returncode
else:
return cmd.returncode
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
result = []
for root, _, files in os.walk(minion_pillar_dir):
for f_minion_id in files:
if re.search(regex, f_minion_id):
result.append(os.path.join(root, f_minion_id))
if len(result) == 0:
print_err('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?')
sys.exit(3)
elif len(result) > 1:
res_arr = []
for r in result:
res_arr.append(f'\"{r}\"')
res_str = ', '.join(res_arr)
print_err('(This should not happen, the system is in an error state if you see this message.)\n')
print_err('More than one manager-type pillar exists, minion id\'s listed below:')
print_err(f' {res_str}')
sys.exit(3)
else:
return result[0]
def read_pillar(pillar: str):
try:
with open(pillar, 'r') as f:
loaded_yaml = yaml.safe_load(f.read())
if loaded_yaml is None:
print_err(f'Could not parse {pillar}')
sys.exit(3)
return loaded_yaml
except:
print_err(f'Could not open {pillar}')
sys.exit(3)
def write_pillar(pillar: str, content: dict):
try:
sids = content['idstools']['sids']
if sids['disabled'] is not None:
if len(sids['disabled']) == 0: sids['disabled'] = None
if sids['enabled'] is not None:
if len(sids['enabled']) == 0: sids['enabled'] = None
if sids['modify'] is not None:
if len(sids['modify']) == 0: sids['modify'] = None
with open(pillar, 'w') as f:
return yaml.dump(content, f, default_flow_style=False)
except Exception as e:
print_err(f'Could not open {pillar}')
sys.exit(3)
def check_sid_pattern(sid_pattern: str):
message = f'SID {sid_pattern} is not valid, did you forget the \"re:\" prefix for a regex pattern?'
if sid_pattern.startswith('re:'):
r_string = sid_pattern[3:]
if not valid_regex(r_string):
print_err('Invalid regex pattern.')
return False
else:
return True
else:
sid: int
try:
sid = int(sid_pattern)
except:
print_err(message)
return False
if sid >= 0:
return True
else:
print_err(message)
return False
def valid_regex(pattern: str):
try:
re.compile(pattern)
return True
except re.error:
return False
def sids_key_exists(pillar: dict, key: str):
return key in pillar.get('idstools', {}).get('sids', {})
def rem_from_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
arr = pillar_dict['idstools']['sids'][key]
if arr is None or val not in arr:
if not optional: print(f'{val} already does not exist in {key}')
else:
pillar_dict['idstools']['sids'][key].remove(val)
return pillar_dict
def add_to_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
if pillar_dict['idstools']['sids'][key] is None:
pillar_dict['idstools']['sids'][key] = []
if val in pillar_dict['idstools']['sids'][key]:
if not optional: print(f'{val} already exists in {key}')
else:
pillar_dict['idstools']['sids'][key].append(val)
return pillar_dict
def add_rem_disabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'disabled'):
pillar_dict['idstools']['sids']['disabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'disabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['disabled'] == pillar_dict['idstools']['sids']['disabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'enabled'):
pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern, optional=True)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is not None:
rem_candidates = []
for action in modify:
if action.startswith(f'{args.sid_pattern} '):
rem_candidates.append(action)
if len(rem_candidates) > 0:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
while answer.lower() not in [ 'y', 'n', '' ]:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
if answer.lower() in [ 'y', '' ]:
for item in rem_candidates:
modify.remove(item)
pillar_dict['idstools']['sids']['modify'] = modify
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_disabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
disabled = pillar_dict.get('idstools', {}).get('sids', {}).get('disabled')
if disabled is None:
print('No rules disabled.')
return 0
else:
print('Disabled rules:')
for rule in disabled:
print(f' - {rule}')
return 0
def add_rem_enabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'enabled'):
pillar_dict['idstools']['sids']['enabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'enabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['enabled'] == pillar_dict['idstools']['sids']['enabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_enabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
enabled = pillar_dict.get('idstools', {}).get('sids', {}).get('enabled')
if enabled is None:
print('No rules explicitly enabled.')
return 0
else:
print('Enabled rules:')
for rule in enabled:
print(f' - {rule}')
return 0
def add_rem_modify(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
if not valid_regex(args.search_term):
print_err('Search term is not a valid regex pattern.')
string_val = f'{args.sid_pattern} \"{args.search_term}\" \"{args.replace_term}\"'
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'modify'):
pillar_dict['idstools']['sids']['modify'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'modify', string_val)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'modify', string_val)
if temp_pillar_dict['idstools']['sids']['modify'] == pillar_dict['idstools']['sids']['modify']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
# TODO: Determine if a rule should be removed from disabled if modified.
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_modified_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is None:
print('No rules currently modified.')
return 0
else:
print('Modified rules + modifications:')
for rule in modify:
print(f' - {rule}')
return 0
def sigint_handler(*_):
print('Exiting gracefully on Ctrl-C')
if salt_proc is not None: salt_proc.send_signal(signal.SIGINT)
sys.exit(0)
def main():
signal.signal(signal.SIGINT, sigint_handler)
if os.geteuid() != 0:
print_err('You must run this script as root')
sys.exit(1)
apply_help='After updating rule configuration, apply the idstools state.'
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
subcommand_desc = textwrap.dedent(
"""\
disabled Manage and list disabled rules (add, remove, list)
enabled Manage and list enabled rules (add, remove, list)
modify Manage and list modified rules (add, remove, list)
"""
)
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
sid_or_regex_help = 'A valid SID (ex: "4321") or regular expression pattern (ex: "re:heartbleed|spectre")'
# Disabled actions
disabled = subparsers.add_parser('disabled')
disabled_sub = disabled.add_subparsers()
disabled_add = disabled_sub.add_parser('add')
disabled_add.set_defaults(func=add_rem_disabled)
disabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_rem = disabled_sub.add_parser('remove')
disabled_rem.set_defaults(func=add_rem_disabled, remove=True)
disabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_list = disabled_sub.add_parser('list')
disabled_list.set_defaults(func=list_disabled_rules)
# Enabled actions
enabled = subparsers.add_parser('enabled')
enabled_sub = enabled.add_subparsers()
enabled_add = enabled_sub.add_parser('add')
enabled_add.set_defaults(func=add_rem_enabled)
enabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_rem = enabled_sub.add_parser('remove')
enabled_rem.set_defaults(func=add_rem_enabled, remove=True)
enabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_list = enabled_sub.add_parser('list')
enabled_list.set_defaults(func=list_enabled_rules)
search_term_help='A properly escaped regex search term (ex: "\\\$EXTERNAL_NET")'
replace_term_help='The text to replace the search term with'
# Modify actions
modify = subparsers.add_parser('modify')
modify_sub = modify.add_subparsers()
modify_add = modify_sub.add_parser('add')
modify_add.set_defaults(func=add_rem_modify)
modify_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
modify_add.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_add.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_rem = modify_sub.add_parser('remove')
modify_rem.set_defaults(func=add_rem_modify, remove=True)
modify_rem.add_argument('sid_pattern', metavar='SID', help=sid_or_regex_help)
modify_rem.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_rem.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_list = modify_sub.add_parser('list')
modify_list.set_defaults(func=list_modified_rules)
# Begin parse + run
args = main_parser.parse_args(sys.argv[1:])
if not hasattr(args, 'remove'):
args.remove = False
args.pillar = find_minion_pillar()
if hasattr(args, 'func'):
exit_code = args.func(args)
else:
if args.command is None:
main_parser.print_help()
else:
if args.command == 'disabled':
disabled.print_help()
elif args.command == 'enabled':
enabled.print_help()
elif args.command == 'modify':
modify.print_help()
sys.exit(0)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -1,10 +0,0 @@
#!/bin/bash
. /usr/sbin/so-common
argstr=""
for arg in "$@"; do
argstr="${argstr} \"${arg}\""
done
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"

View File

@@ -1,55 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
default_salt_dir=/opt/so/saltstack/default
clone_to_tmp() {
# TODO Need to add a air gap option
# Make a temp location for the files
mkdir /tmp/sogh
cd /tmp/sogh
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion.git
git clone https://github.com/Security-Onion-Solutions/securityonion.git
cd /tmp
}
copy_new_files() {
# Copy new files over to the salt dir
cd /tmp/sogh/securityonion
git checkout $BRANCH
VERSION=$(cat VERSION)
# We need to overwrite if there is a repo file
if [ -d /opt/so/repo ]; then
tar -czf /opt/so/repo/"$VERSION".tar.gz -C "$(pwd)/.." .
fi
rsync -a salt $default_salt_dir/
rsync -a pillar $default_salt_dir/
chown -R socore:socore $default_salt_dir/salt
chown -R socore:socore $default_salt_dir/pillar
chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
rm -rf /tmp/sogh
}
got_root(){
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
}
got_root
if [ $# -ne 1 ] ; then
BRANCH=master
else
BRANCH=$1
fi
clone_to_tmp
copy_new_files

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart sensoroni $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start sensoroni $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop sensoroni $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart soctopus $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start soctopus $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop soctopus $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart suricata $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start suricata $1

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop suricata $1

View File

@@ -1,60 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{%- set MANAGER = salt['grains.get']('master') %}
{%- set VERSION = salt['pillar.get']('global:soversion') %}
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
TESTRULE=$1
TESTPCAP=$2
. /usr/sbin/so-common
if [ $# -lt 2 ]; then
echo "Usage: $0 <CustomRule> <TargetPCAP>"
exit 1
fi
echo ""
echo "==============="
echo "Running all.rules and $TESTRULE against the following pcap: $TESTPCAP"
echo ""
sleep 3
rm -rf /tmp/nids-testing/output
mkdir -p /tmp/nids-testing/output
chown suricata:socore /tmp/nids-testing/output
mkdir -p /tmp/nids-testing/rules
cp /opt/so/conf/suricata/rules/all.rules /tmp/nids-testing/rules/all.rules
cat $TESTRULE >> /tmp/nids-testing/rules/all.rules
echo "==== Begin Suricata Output ==="
docker run --rm \
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
-v /tmp/nids-testing/rules:/etc/suricata/rules:ro \
-v "$TESTPCAP:/input.pcap:ro" \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
-v /tmp/nids-testing/output/:/nsm/:rw \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -v -k none -r /input.pcap -l /tmp --init-errors-fatal
echo "==== End Suricata Output ==="
echo ""
echo "If any alerts hit, they will be displayed below:"
echo ""
cat /tmp/nids-testing/output/* | jq
echo ""
echo "End so-suricata-testrule"
echo "==============="
echo ""

View File

@@ -1,699 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
source $(dirname $0)/so-common
DEFAULT_ROLE=analyst
function usage() {
cat <<USAGE_EOF
Usage: $0 <operation> [supporting parameters]
where <operation> is one of the following:
list: Lists all user email addresses currently defined in the identity system
add: Adds a new user to the identity system
Required parameters:
--email <email>
Optional parameters:
--role <role> (defaults to $DEFAULT_ROLE)
--firstName <firstName> (defaults to blank)
--lastName <lastName> (defaults to blank)
--note <note> (defaults to blank)
--skip-sync (defers the Elastic sync until the next scheduled time)
addrole: Grants a role to an existing user
Required parameters:
--email <email>
--role <role>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
delrole: Removes a role from an existing user
Required parameters:
--email <email>
--role <role>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
password: Updates a user's password and disables MFA
Required parameters:
--email <email>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
profile: Updates a user's profile information
Required parameters:
--email <email>
Optional parameters:
--role <role> (defaults to $DEFAULT_ROLE)
--firstName <firstName> (defaults to blank)
--lastName <lastName> (defaults to blank)
--note <note> (defaults to blank)
enable: Enables a user
Required parameters:
--email <email>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
disable: Disables a user
Required parameters:
--email <email>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
validate: Validates that the given email address and password are acceptable
Required parameters:
--email <email>
valemail: Validates that the given email address is acceptable; requires 'email' parameter
Required parameters:
--email <email>
valpass: Validates that a password is acceptable
Note that the password can be piped into STDIN to avoid prompting for it
USAGE_EOF
exit 1
}
if [[ $# -lt 1 || $1 == --help || $1 == -h || $1 == -? || $1 == --h ]]; then
usage
fi
operation=$1
shift
while [[ $# -gt 0 ]]; do
param=$1
shift
case "$param" in
--email)
email=$1
shift
;;
--role)
role=$1
shift
;;
--firstName)
firstName=$1
shift
;;
--lastName)
lastName=$1
shift
;;
--note)
note=$1
shift
;;
--skip-sync)
SKIP_SYNC=1
;;
*)
echo "Encountered unexpected parameter: $param"
usage
;;
esac
done
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434/admin}
databasePath=${KRATOS_DB_PATH:-/nsm/kratos/db/db.sqlite}
databaseTimeout=${KRATOS_DB_TIMEOUT:-5000}
bcryptRounds=${BCRYPT_ROUNDS:-12}
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
socRolesFile=${SOC_ROLES_FILE:-/opt/so/conf/soc/soc_users_roles}
esUID=${ELASTIC_UID:-930}
esGID=${ELASTIC_GID:-930}
soUID=${SOCORE_UID:-939}
soGID=${SOCORE_GID:-939}
function lock() {
# Obtain file descriptor lock
exec 99>/var/tmp/so-user.lock || fail "Unable to create lock descriptor; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
flock -w 10 99 || fail "Another process is using so-user; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
trap 'rm -f /var/tmp/so-user.lock' EXIT
}
function fail() {
msg=$1
echo "$1"
exit 1
}
function require() {
cmd=$1
which "$1" 2>&1 > /dev/null
[[ $? != 0 ]] && fail "This script requires the following command be installed: ${cmd}"
}
# Verify this environment is capable of running this script
function verifyEnvironment() {
require "htpasswd"
require "jq"
require "curl"
require "openssl"
require "sqlite3"
[[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable"
response=$(curl -Ss -L ${kratosUrl}/)
[[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable"
}
function findIdByEmail() {
email=$1
response=$(curl -Ss -L ${kratosUrl}/identities)
identityId=$(echo "${response}" | jq -r ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
echo $identityId
}
function validatePassword() {
password=$1
len=$(expr length "$password")
if [[ $len -lt 8 ]]; then
fail "Password does not meet the minimum requirements"
fi
if [[ $len -gt 72 ]]; then
fail "Password is too long (max: 72)"
fi
check_password_and_exit "$password"
}
function validateEmail() {
email=$1
# (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
fail "Email address is invalid"
fi
if [[ "$email" =~ [A-Z] ]]; then
fail "Email addresses cannot contain uppercase letters"
fi
}
function hashPassword() {
password=$1
passwordHash=$(echo "${password}" | htpasswd -niBC $bcryptRounds SOUSER)
passwordHash=$(echo "$passwordHash" | cut -c 11-)
passwordHash="\$2a${passwordHash}" # still waiting for https://github.com/elastic/elasticsearch/issues/51132
echo "$passwordHash"
}
function updatePassword() {
identityId=$1
if [ -z "$password" ]; then
# Read password from stdin (show prompt only if no stdin was piped in)
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs password
validatePassword "$password"
fi
if [[ -n "$identityId" ]]; then
# Generate password hash
passwordHash=$(hashPassword "$password")
# Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
# Deactivate MFA
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='totp'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='totp');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password"
fi
}
function createFile() {
filename=$1
uid=$2
gid=$3
mkdir -p $(dirname "$filename")
truncate -s 0 "$filename"
chmod 600 "$filename"
chown "${uid}:${gid}" "$filename"
}
function ensureRoleFileExists() {
if [[ ! -f "$socRolesFile" || ! -s "$socRolesFile" ]]; then
# Generate the new users file
rolesTmpFile="${socRolesFile}.tmp"
createFile "$rolesTmpFile" "$soUID" "$soGID"
if [[ -f "$databasePath" ]]; then
echo "Migrating roles to new file: $socRolesFile"
echo "select 'superuser:' || id from identities;" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" \
>> "$rolesTmpFile"
[[ $? != 0 ]] && fail "Unable to read identities from database"
echo "The following users have all been migrated with the super user role:"
cat "${rolesTmpFile}"
else
echo "Database file does not exist yet, installation is likely not yet complete."
fi
if [[ -d "$socRolesFile" ]]; then
echo "Removing invalid roles directory created by Docker"
rm -fr "$socRolesFile"
fi
mv "${rolesTmpFile}" "${socRolesFile}"
fi
}
function syncElasticSystemUser() {
json=$1
userid=$2
usersFile=$3
user=$(echo "$json" | jq -r ".local.users.$userid.user")
pass=$(echo "$json" | jq -r ".local.users.$userid.pass")
[[ -z "$user" || -z "$pass" ]] && fail "Elastic auth credentials for system user '$userid' are missing"
hash=$(hashPassword "$pass")
echo "${user}:${hash}" >> "$usersFile"
}
function syncElasticSystemRole() {
json=$1
userid=$2
role=$3
rolesFile=$4
user=$(echo "$json" | jq -r ".local.users.$userid.user")
[[ -z "$user" ]] && fail "Elastic auth credentials for system user '$userid' are missing"
echo "${role}:${user}" >> "$rolesFile"
}
function syncElastic() {
[[ -n $SKIP_SYNC ]] && return
echo "Syncing users and roles between SOC and Elastic..."
usersTmpFile="${elasticUsersFile}.tmp"
createFile "${usersTmpFile}" "$esUID" "$esGID"
rolesTmpFile="${elasticRolesFile}.tmp"
createFile "${rolesTmpFile}" "$esUID" "$esGID"
authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json")
syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile"
syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile"
syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile"
syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile"
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "kibana_system" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile"
if [[ -f "$databasePath" && -f "$socRolesFile" ]]; then
# Append the SOC users
userData=$(echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
"from identity_credential_identifiers ici, identity_credentials ic, identities i, identity_credential_types ict " \
"where " \
" ici.identity_credential_id=ic.id " \
" and ic.identity_id=i.id " \
" and ict.id=ic.identity_credential_type_id " \
" and ict.name='password' " \
" and instr(ic.config, 'hashed_password') " \
" and i.state == 'active' " \
"order by ici.identifier;" | \
sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath")
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
echo "${userData}" | \
jq -r '.user + ":" + .data.hashed_password' \
>> "$usersTmpFile"
# Append the user roles
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do
userId=$(echo "$rolePair" | cut -d: -f2)
role=$(echo "$rolePair" | cut -d: -f1)
echo "select '$role:' || ici.identifier " \
"from identity_credential_identifiers ici, identity_credentials ic, identity_credential_types ict " \
"where ici.identity_credential_id=ic.id " \
" and ict.id=ic.identity_credential_type_id " \
" and ict.name='password' " \
" and ic.identity_id = '$userId';" | \
sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" >> "$rolesTmpFile"
[[ $? != 0 ]] && fail "Unable to read role identities from database"
done < "$socRolesFile"
else
echo "Database file or soc roles file does not exist yet, skipping users export"
fi
if [[ -s "${usersTmpFile}" ]]; then
mv "${usersTmpFile}" "${elasticUsersFile}"
mv "${rolesTmpFile}" "${elasticRolesFile}"
if [[ -z "$SKIP_STATE_APPLY" ]]; then
echo "Elastic state will be re-applied to affected minions. This will run in the background and may take several minutes to complete."
echo "Applying elastic state to elastic minions at $(date)" >> /opt/so/log/soc/sync.log 2>&1
salt --async -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1
fi
else
echo "Newly generated users/roles files are incomplete; aborting."
fi
}
function syncAll() {
ensureRoleFileExists
# Check if a sync is needed. Sync is not needed if the following are true:
# - user database entries are all older than the elastic users file
# - soc roles file last modify date is older than the elastic roles file
if [[ -z "$FORCE_SYNC" && -f "$databasePath" && -f "$elasticUsersFile" ]]; then
usersFileAgeSecs=$(echo $(($(date +%s) - $(date +%s -r "$elasticUsersFile"))))
staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${usersFileAgeSecs} seconds');" \
| sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath")
[[ $? != 0 ]] && fail "Unable to read user count from database"
if [[ "$staleCount" == "0" && "$elasticRolesFile" -nt "$socRolesFile" ]]; then
return 1
fi
fi
syncElastic
return 0
}
function listUsers() {
response=$(curl -Ss -L ${kratosUrl}/identities)
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
users=$(echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort)
for user in $users; do
roles=$(grep ":$user\$" "$elasticRolesFile" | cut -d: -f1 | tr '\n' ' ')
echo "$user: $roles"
done
}
function addUserRole() {
email=$1
role=$2
adjustUserRole "$email" "$role" "add"
}
function deleteUserRole() {
email=$1
role=$2
adjustUserRole "$email" "$role" "del"
}
function adjustUserRole() {
email=$1
role=$2
op=$3
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
ensureRoleFileExists
filename="$socRolesFile"
hasRole=0
grep "^$role:" "$socRolesFile" | grep -q "$identityId" && hasRole=1
if [[ "$op" == "add" ]]; then
if [[ "$hasRole" == "1" ]]; then
echo "User '$email' already has the role: $role"
return 1
else
echo "$role:$identityId" >> "$filename"
fi
elif [[ "$op" == "del" ]]; then
if [[ "$hasRole" -ne 1 ]]; then
fail "User '$email' does not have the role: $role"
else
sed "/^$role:$identityId\$/d" "$filename" > "$filename.tmp"
cat "$filename".tmp > "$filename"
rm -f "$filename".tmp
fi
else
fail "Unsupported role adjustment operation: $op"
fi
return 0
}
function createUser() {
email=$1
role=$2
firstName=$3
lastName=$4
note=$5
now=$(date -u +%FT%TZ)
addUserJson=$(cat <<EOF
{
"traits": {
"email": "${email}",
"firstName": "${firstName}",
"lastName": "${lastName}",
"note": "${note}"
},
"schema_id": "default"
}
EOF
)
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
identityId=$(echo "${response}" | jq -r ".id")
if [[ "${identityId}" == "null" ]]; then
code=$(echo "${response}" | jq ".error.code")
[[ "${code}" == "409" ]] && fail "User already exists"
reason=$(echo "${response}" | jq ".error.message")
[[ $? == 0 ]] && fail "Unable to add user: ${reason}"
else
updatePassword "$identityId"
addUserRole "$email" "$role"
fi
}
function updateStatus() {
email=$1
status=$2
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
schemaId=$(echo "$response" | jq -r .schema_id)
# Capture traits and remove obsolete 'status' trait if exists
traitBlock=$(echo "$response" | jq -c .traits | sed -re 's/,?"status":".*?"//')
state="active"
if [[ "$status" == "locked" ]]; then
state="inactive"
fi
body="{ \"schema_id\": \"$schemaId\", \"state\": \"$state\", \"traits\": $traitBlock }"
response=$(curl -fSsL -XPUT -H "Content-Type: application/json" "${kratosUrl}/identities/$identityId" -d "$body")
[[ $? != 0 ]] && fail "Unable to update user"
}
function updateUserPassword() {
email=$1
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
updatePassword "$identityId"
}
function updateUserProfile() {
email=$1
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
schemaId=$(echo "$response" | jq -r .schema_id)
state=$(echo "$response" | jq -r .state)
traitBlock="{\"email\":\"$email\",\"firstName\":\"$firstName\",\"lastName\":\"$lastName\",\"note\":\"$note\"}"
body="{ \"schema_id\": \"$schemaId\", \"state\": \"$state\", \"traits\": $traitBlock }"
response=$(curl -fSsL -XPUT -H "Content-Type: application/json" "${kratosUrl}/identities/$identityId" -d "$body")
[[ $? != 0 ]] && fail "Unable to update user"
}
function deleteUser() {
email=$1
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
rolesTmpFile="${socRolesFile}.tmp"
createFile "$rolesTmpFile" "$soUID" "$soGID"
grep -v "$identityId" "$socRolesFile" > "$rolesTmpFile"
cat "$rolesTmpFile" > "$socRolesFile"
}
case "${operation}" in
"add")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
lock
validateEmail "$email"
updatePassword
createUser "$email" "${role:-$DEFAULT_ROLE}" "${firstName}" "${lastName}" "${note}"
syncAll
echo "Successfully added new user to SOC"
echo "$password" | so-influxdb-manage useradd "$email"
if [[ "$role" == "superuser" ]]; then
echo "$password" | so-influxdb-manage userpromote "$email"
fi
;;
"list")
verifyEnvironment
listUsers
;;
"addrole")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
[[ "$role" == "" ]] && fail "Role must be provided"
lock
validateEmail "$email"
if addUserRole "$email" "$role"; then
syncElastic
echo "Successfully added role to user"
if [[ "$role" == "superuser" ]]; then
echo "$password" | so-influxdb-manage userpromote "$email"
fi
fi
;;
"delrole")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
[[ "$role" == "" ]] && fail "Role must be provided"
lock
validateEmail "$email"
deleteUserRole "$email" "$role"
syncElastic
echo "Successfully removed role from user"
if [[ "$role" == "superuser" ]]; then
echo "$password" | so-influxdb-manage userdemote "$email"
fi
;;
"password")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
lock
updateUserPassword "$email"
syncAll
echo "Successfully updated user password"
echo "$password" | so-influxdb-manage userpass "$email"
;;
"profile")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
lock
updateUserProfile "$email"
echo "Successfully updated user profile"
;;
"enable")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
lock
updateStatus "$email" 'active'
syncAll
echo "Successfully enabled user"
so-influxdb-manage userenable "$email"
;;
"disable")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
lock
updateStatus "$email" 'locked'
syncAll
echo "Successfully disabled user"
so-influxdb-manage userdisable "$email"
;;
"delete")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
lock
deleteUser "$email"
syncAll
echo "Successfully deleted user"
so-influxdb-manage userdel "$email"
;;
"sync")
lock
syncAll
;;
"validate")
validateEmail "$email"
updatePassword
echo "Email and password are acceptable"
;;
"valemail")
validateEmail "$email"
echo "Email is acceptable"
;;
"valpass")
updatePassword
echo "Password is acceptable"
;;
*)
fail "Unsupported operation: $operation"
usage
;;
esac
exit 0

View File

@@ -1,2 +0,0 @@
#!/bin/bash
so-user add --email $1

View File

@@ -1,2 +0,0 @@
#!/bin/bash
so-user disable --email $1

View File

@@ -1,2 +0,0 @@
#!/bin/bash
so-user enable --email $1

View File

@@ -1,2 +0,0 @@
#!/bin/bash
so-user list

File diff suppressed because it is too large Load Diff