merge with dev

This commit is contained in:
m0duspwnens
2021-11-09 13:07:35 -05:00
67 changed files with 4353 additions and 453 deletions

View File

@@ -15,7 +15,7 @@
### Contributing code
* **All commits must be signed** with a valid key that has been added to your GitHub account. The commits should have all the "**Verified**" tag when viewed on GitHub as shown below:
* **All commits must be signed** with a valid key that has been added to your GitHub account. Each commit should have the "**Verified**" tag when viewed on GitHub as shown below:
<img src="./assets/images/verified-commit-1.png" width="450">

View File

@@ -16,6 +16,10 @@ firewall:
ips:
delete:
insert:
endgame:
ips:
delete:
insert:
fleet:
ips:
delete:

View File

@@ -24,8 +24,9 @@ pki_private_key:
- x509: /etc/pki/ca.crt
{%- endif %}
/etc/pki/ca.crt:
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ manager }}
- C: US
@@ -66,4 +67,4 @@ cakeyperms:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -9,6 +9,11 @@ rmvariablesfile:
file.absent:
- name: /tmp/variables.txt
dockergroup:
group.present:
- name: docker
- gid: 920
# Add socore Group
socoregroup:
group.present:
@@ -101,6 +106,7 @@ commonpkgs:
- python3-m2crypto
- python3-mysqldb
- python3-packaging
- python3-lxml
- git
- vim
@@ -143,6 +149,7 @@ commonpkgs:
- python36-m2crypto
- python36-mysql
- python36-packaging
- python36-lxml
- yum-utils
- device-mapper-persistent-data
- lvm2

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env python3
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
@@ -15,152 +15,199 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
import ipaddress
import textwrap
import os
import subprocess
import sys
import argparse
import re
from lxml import etree as ET
from xml.dom import minidom
from datetime import datetime as dt
from datetime import timezone as tz
local_salt_dir=/opt/so/saltstack/local
SKIP=0
function usage {
cat << EOF
Usage: $0 [-abefhoprsw] [ -i IP ]
This program allows you to add a firewall rule to allow connections from a new IP address or CIDR range.
If you run this program with no arguments, it will present a menu for you to choose your options.
If you want to automate and skip the menu, you can pass the desired options as command line arguments.
EXAMPLES
To add 10.1.2.3 to the analyst role:
so-allow -a -i 10.1.2.3
To add 10.1.2.0/24 to the osquery role:
so-allow -o -i 10.1.2.0/24
EOF
LOCAL_SALT_DIR='/opt/so/saltstack/local'
WAZUH_CONF='/nsm/wazuh/etc/ossec.conf'
VALID_ROLES = {
'a': { 'role': 'analyst','desc': 'Analyst - 80/tcp, 443/tcp' },
'b': { 'role': 'beats_endpoint', 'desc': 'Logstash Beat - 5044/tcp' },
'e': { 'role': 'elasticsearch_rest', 'desc': 'Elasticsearch REST API - 9200/tcp' },
'f': { 'role': 'strelka_frontend', 'desc': 'Strelka frontend - 57314/tcp' },
'o': { 'role': 'osquery_endpoint', 'desc': 'Osquery endpoint - 8090/tcp' },
's': { 'role': 'syslog', 'desc': 'Syslog device - 514/tcp/udp' },
'w': { 'role': 'wazuh_agent', 'desc': 'Wazuh agent - 1514/tcp/udp' },
'p': { 'role': 'wazuh_api', 'desc': 'Wazuh API - 55000/tcp' },
'r': { 'role': 'wazuh_authd', 'desc': 'Wazuh registration service - 1515/tcp' }
}
while getopts "ahfesprbowi:" OPTION
do
case $OPTION in
h)
usage
exit 0
;;
a)
FULLROLE="analyst"
SKIP=1
;;
b)
FULLROLE="beats_endpoint"
SKIP=1
;;
e)
FULLROLE="elasticsearch_rest"
SKIP=1
;;
f)
FULLROLE="strelka_frontend"
SKIP=1
;;
i) IP=$OPTARG
;;
o)
FULLROLE="osquery_endpoint"
SKIP=1
;;
w)
FULLROLE="wazuh_agent"
SKIP=1
;;
s)
FULLROLE="syslog"
SKIP=1
;;
p)
FULLROLE="wazuh_api"
SKIP=1
;;
r)
FULLROLE="wazuh_authd"
SKIP=1
;;
*)
usage
exit 0
;;
esac
done
if [ "$SKIP" -eq 0 ]; then
def validate_ip_cidr(ip_cidr: str) -> bool:
try:
ipaddress.ip_address(ip_cidr)
except ValueError:
try:
ipaddress.ip_network(ip_cidr)
except ValueError:
return False
return True
echo "This program allows you to add a firewall rule to allow connections from a new IP address."
echo ""
echo "Choose the role for the IP or Range you would like to add"
echo ""
echo "[a] - Analyst - ports 80/tcp and 443/tcp"
echo "[b] - Logstash Beat - port 5044/tcp"
echo "[e] - Elasticsearch REST API - port 9200/tcp"
echo "[f] - Strelka frontend - port 57314/tcp"
echo "[o] - Osquery endpoint - port 8090/tcp"
echo "[s] - Syslog device - 514/tcp/udp"
echo "[w] - Wazuh agent - port 1514/tcp/udp"
echo "[p] - Wazuh API - port 55000/tcp"
echo "[r] - Wazuh registration service - 1515/tcp"
echo ""
echo "Please enter your selection:"
read -r ROLE
echo "Enter a single ip address or range to allow (example: 10.10.10.10 or 10.10.0.0/16):"
read -r IP
if [ "$ROLE" == "a" ]; then
FULLROLE=analyst
elif [ "$ROLE" == "b" ]; then
FULLROLE=beats_endpoint
elif [ "$ROLE" == "e" ]; then
FULLROLE=elasticsearch_rest
elif [ "$ROLE" == "f" ]; then
FULLROLE=strelka_frontend
elif [ "$ROLE" == "o" ]; then
FULLROLE=osquery_endpoint
elif [ "$ROLE" == "w" ]; then
FULLROLE=wazuh_agent
elif [ "$ROLE" == "s" ]; then
FULLROLE=syslog
elif [ "$ROLE" == "p" ]; then
FULLROLE=wazuh_api
elif [ "$ROLE" == "r" ]; then
FULLROLE=wazuh_authd
else
echo "I don't recognize that role"
exit 1
fi
def role_prompt() -> str:
print()
print('Choose the role for the IP or Range you would like to allow')
print()
for role in VALID_ROLES:
print(f'[{role}] - {VALID_ROLES[role]["desc"]}')
print()
role = input('Please enter your selection: ')
if role in VALID_ROLES.keys():
return VALID_ROLES[role]['role']
else:
print(f'Invalid role \'{role}\', please try again.', file=sys.stderr)
sys.exit(1)
fi
def ip_prompt() -> str:
ip = input('Enter a single ip address or range to allow (ex: 10.10.10.10 or 10.10.0.0/16): ')
if validate_ip_cidr(ip):
return ip
else:
print(f'Invalid IP address or CIDR block \'{ip}\', please try again.', file=sys.stderr)
sys.exit(1)
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
/usr/sbin/so-firewall includehost $FULLROLE $IP
salt-call state.apply firewall queue=True
# Check if Wazuh enabled
if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then
# If analyst, add to Wazuh AR whitelist
if [ "$FULLROLE" == "analyst" ]; then
WAZUH_MGR_CFG="/nsm/wazuh/etc/ossec.conf"
if ! grep -q "<white_list>$IP</white_list>" $WAZUH_MGR_CFG ; then
DATE=$(date)
sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG
sed -i '/^$/N;/^\n$/D' $WAZUH_MGR_CFG
echo -e "<!--Address $IP added by /usr/sbin/so-allow on \"$DATE\"-->\n <global>\n <white_list>$IP</white_list>\n </global>\n</ossec_config>" >> $WAZUH_MGR_CFG
echo "Added whitelist entry for $IP in $WAZUH_MGR_CFG."
echo
echo "Restarting OSSEC Server..."
/usr/sbin/so-wazuh-restart
fi
fi
fi
def wazuh_enabled() -> bool:
for file in os.listdir(f'{LOCAL_SALT_DIR}/pillar'):
with open(file, 'r') as pillar:
if 'wazuh: 1' in pillar.read():
return True
return False
def root_to_str(root: ET.ElementTree) -> str:
xml_str = ET.tostring(root, encoding='unicode', method='xml').replace('\n', '')
xml_str = re.sub(r'(?:(?<=>) *)', '', xml_str)
xml_str = re.sub(r' -', '', xml_str)
xml_str = re.sub(r' -->', ' -->', xml_str)
dom = minidom.parseString(xml_str)
return dom.toprettyxml(indent=" ")
def add_wl(ip):
parser = ET.XMLParser(remove_blank_text=True)
with open(WAZUH_CONF, 'rb') as wazuh_conf:
tree = ET.parse(wazuh_conf, parser)
root = tree.getroot()
source_comment = ET.Comment(f'Address {ip} added by /usr/sbin/so-allow on {dt.utcnow().replace(tzinfo=tz.utc).strftime("%a %b %e %H:%M:%S %Z %Y")}')
new_global = ET.Element("global")
new_wl = ET.SubElement(new_global, 'white_list')
new_wl.text = ip
root.append(source_comment)
root.append(new_global)
with open(WAZUH_CONF, 'w') as add_out:
add_out.write(root_to_str(root))
def apply(role: str, ip: str) -> int:
firewall_cmd = ['so-firewall', 'includehost', role, ip]
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'firewall', 'queue=True']
restart_wazuh_cmd = ['so-wazuh-restart']
print(f'Adding {ip} to the {role} role. This can take a few seconds...')
cmd = subprocess.run(firewall_cmd)
if cmd.returncode == 0:
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
else:
return cmd.returncode
if cmd.returncode == 0:
if wazuh_enabled and role=='analyst':
try:
add_wl(ip)
print(f'Added whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
except Exception as e:
print(f'Failed to add whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
print(e)
return 1
print('Restarting OSSEC Server...')
cmd = subprocess.run(restart_wazuh_cmd)
else:
return cmd.returncode
else:
print(f'Commmand \'{" ".join(salt_cmd)}\' failed.', file=sys.stderr)
return cmd.returncode
if cmd.returncode != 0:
print('Failed to restart OSSEC server.')
return cmd.returncode
def main():
if os.geteuid() != 0:
print('You must run this script as root', file=sys.stderr)
sys.exit(1)
main_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(f'''\
additional information:
To use this script in interactive mode call it with no arguments
'''
))
group = main_parser.add_argument_group(title='roles')
group.add_argument('-a', dest='roles', action='append_const', const=VALID_ROLES['a']['role'], help="Analyst - 80/tcp, 443/tcp")
group.add_argument('-b', dest='roles', action='append_const', const=VALID_ROLES['b']['role'], help="Logstash Beat - 5044/tcp")
group.add_argument('-e', dest='roles', action='append_const', const=VALID_ROLES['e']['role'], help="Elasticsearch REST API - 9200/tcp")
group.add_argument('-f', dest='roles', action='append_const', const=VALID_ROLES['f']['role'], help="Strelka frontend - 57314/tcp")
group.add_argument('-o', dest='roles', action='append_const', const=VALID_ROLES['o']['role'], help="Osquery endpoint - 8090/tcp")
group.add_argument('-s', dest='roles', action='append_const', const=VALID_ROLES['s']['role'], help="Syslog device - 514/tcp/udp")
group.add_argument('-w', dest='roles', action='append_const', const=VALID_ROLES['w']['role'], help="Wazuh agent - 1514/tcp/udp")
group.add_argument('-p', dest='roles', action='append_const', const=VALID_ROLES['p']['role'], help="Wazuh API - 55000/tcp")
group.add_argument('-r', dest='roles', action='append_const', const=VALID_ROLES['r']['role'], help="Wazuh registration service - 1515/tcp")
ip_g = main_parser.add_argument_group(title='allow')
ip_g.add_argument('-i', help="IP or CIDR block to disallow connections from, requires at least one role argument", metavar='', dest='ip')
args = main_parser.parse_args(sys.argv[1:])
if args.roles is None:
role = role_prompt()
ip = ip_prompt()
try:
return_code = apply(role, ip)
except Exception as e:
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
return_code = e.errno
sys.exit(return_code)
elif args.roles is not None and args.ip is None:
if os.environ.get('IP') is None:
main_parser.print_help()
sys.exit(1)
else:
args.ip = os.environ['IP']
if validate_ip_cidr(args.ip):
try:
for role in args.roles:
return_code = apply(role, args.ip)
if return_code > 0:
break
except Exception as e:
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
return_code = e.errno
else:
print(f'Invalid IP address or CIDR block \'{args.ip}\', please try again.', file=sys.stderr)
return_code = 1
sys.exit(return_code)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)

213
salt/common/tools/sbin/so-deny Executable file
View File

@@ -0,0 +1,213 @@
#!/usr/bin/env python3
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ipaddress
import textwrap
import os
import subprocess
import sys
import argparse
import re
from lxml import etree as ET
from xml.dom import minidom
LOCAL_SALT_DIR='/opt/so/saltstack/local'
WAZUH_CONF='/nsm/wazuh/etc/ossec.conf'
VALID_ROLES = {
'a': { 'role': 'analyst','desc': 'Analyst - 80/tcp, 443/tcp' },
'b': { 'role': 'beats_endpoint', 'desc': 'Logstash Beat - 5044/tcp' },
'e': { 'role': 'elasticsearch_rest', 'desc': 'Elasticsearch REST API - 9200/tcp' },
'f': { 'role': 'strelka_frontend', 'desc': 'Strelka frontend - 57314/tcp' },
'o': { 'role': 'osquery_endpoint', 'desc': 'Osquery endpoint - 8090/tcp' },
's': { 'role': 'syslog', 'desc': 'Syslog device - 514/tcp/udp' },
'w': { 'role': 'wazuh_agent', 'desc': 'Wazuh agent - 1514/tcp/udp' },
'p': { 'role': 'wazuh_api', 'desc': 'Wazuh API - 55000/tcp' },
'r': { 'role': 'wazuh_authd', 'desc': 'Wazuh registration service - 1515/tcp' }
}
def validate_ip_cidr(ip_cidr: str) -> bool:
try:
ipaddress.ip_address(ip_cidr)
except ValueError:
try:
ipaddress.ip_network(ip_cidr)
except ValueError:
return False
return True
def role_prompt() -> str:
print()
print('Choose the role for the IP or Range you would like to deny')
print()
for role in VALID_ROLES:
print(f'[{role}] - {VALID_ROLES[role]["desc"]}')
print()
role = input('Please enter your selection: ')
if role in VALID_ROLES.keys():
return VALID_ROLES[role]['role']
else:
print(f'Invalid role \'{role}\', please try again.', file=sys.stderr)
sys.exit(1)
def ip_prompt() -> str:
ip = input('Enter a single ip address or range to deny (ex: 10.10.10.10 or 10.10.0.0/16): ')
if validate_ip_cidr(ip):
return ip
else:
print(f'Invalid IP address or CIDR block \'{ip}\', please try again.', file=sys.stderr)
sys.exit(1)
def wazuh_enabled() -> bool:
for file in os.listdir(f'{LOCAL_SALT_DIR}/pillar'):
with open(file, 'r') as pillar:
if 'wazuh: 1' in pillar.read():
return True
return False
def root_to_str(root: ET.ElementTree) -> str:
xml_str = ET.tostring(root, encoding='unicode', method='xml').replace('\n', '')
xml_str = re.sub(r'(?:(?<=>) *)', '', xml_str)
# Remove specific substrings to better format comments on intial parse/write
xml_str = re.sub(r' -', '', xml_str)
xml_str = re.sub(r' -->', ' -->', xml_str)
dom = minidom.parseString(xml_str)
return dom.toprettyxml(indent=" ")
def rem_wl(ip):
parser = ET.XMLParser(remove_blank_text=True)
with open(WAZUH_CONF, 'rb') as wazuh_conf:
tree = ET.parse(wazuh_conf, parser)
root = tree.getroot()
global_elems = root.findall(f"global/white_list[. = '{ip}']/..")
if len(global_elems) > 0:
for g_elem in global_elems:
ge_index = list(root).index(g_elem)
if ge_index > 0 and root[list(root).index(g_elem) - 1].tag == ET.Comment:
root.remove(root[ge_index - 1])
root.remove(g_elem)
with open(WAZUH_CONF, 'w') as out:
out.write(root_to_str(root))
def apply(role: str, ip: str) -> int:
firewall_cmd = ['so-firewall', 'excludehost', role, ip]
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'firewall', 'queue=True']
restart_wazuh_cmd = ['so-wazuh-restart']
print(f'Removing {ip} from the {role} role. This can take a few seconds...')
cmd = subprocess.run(firewall_cmd)
if cmd.returncode == 0:
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
else:
return cmd.returncode
if cmd.returncode == 0:
if wazuh_enabled and role=='analyst':
try:
rem_wl(ip)
print(f'Removed whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
except Exception as e:
print(f'Failed to remove whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
print(e)
return 1
print('Restarting OSSEC Server...')
cmd = subprocess.run(restart_wazuh_cmd)
else:
return cmd.returncode
else:
print(f'Commmand \'{" ".join(salt_cmd)}\' failed.', file=sys.stderr)
return cmd.returncode
if cmd.returncode != 0:
print('Failed to restart OSSEC server.')
return cmd.returncode
def main():
if os.geteuid() != 0:
print('You must run this script as root', file=sys.stderr)
sys.exit(1)
main_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(f'''\
additional information:
To use this script in interactive mode call it with no arguments
'''
))
group = main_parser.add_argument_group(title='roles')
group.add_argument('-a', dest='roles', action='append_const', const=VALID_ROLES['a']['role'], help="Analyst - 80/tcp, 443/tcp")
group.add_argument('-b', dest='roles', action='append_const', const=VALID_ROLES['b']['role'], help="Logstash Beat - 5044/tcp")
group.add_argument('-e', dest='roles', action='append_const', const=VALID_ROLES['e']['role'], help="Elasticsearch REST API - 9200/tcp")
group.add_argument('-f', dest='roles', action='append_const', const=VALID_ROLES['f']['role'], help="Strelka frontend - 57314/tcp")
group.add_argument('-o', dest='roles', action='append_const', const=VALID_ROLES['o']['role'], help="Osquery endpoint - 8090/tcp")
group.add_argument('-s', dest='roles', action='append_const', const=VALID_ROLES['s']['role'], help="Syslog device - 514/tcp/udp")
group.add_argument('-w', dest='roles', action='append_const', const=VALID_ROLES['w']['role'], help="Wazuh agent - 1514/tcp/udp")
group.add_argument('-p', dest='roles', action='append_const', const=VALID_ROLES['p']['role'], help="Wazuh API - 55000/tcp")
group.add_argument('-r', dest='roles', action='append_const', const=VALID_ROLES['r']['role'], help="Wazuh registration service - 1515/tcp")
ip_g = main_parser.add_argument_group(title='allow')
ip_g.add_argument('-i', help="IP or CIDR block to disallow connections from, requires at least one role argument", metavar='', dest='ip')
args = main_parser.parse_args(sys.argv[1:])
if args.roles is None:
role = role_prompt()
ip = ip_prompt()
try:
return_code = apply(role, ip)
except Exception as e:
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
return_code = e.errno
sys.exit(return_code)
elif args.roles is not None and args.ip is None:
if os.environ.get('IP') is None:
main_parser.print_help()
sys.exit(1)
else:
args.ip = os.environ['IP']
if validate_ip_cidr(args.ip):
try:
for role in args.roles:
return_code = apply(role, args.ip)
if return_code > 0:
break
except Exception as e:
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
return_code = e.errno
else:
print(f'Invalid IP address or CIDR block \'{args.ip}\', please try again.', file=sys.stderr)
return_code = 1
sys.exit(return_code)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)

View File

@@ -70,7 +70,7 @@ do
done
docker_exec(){
CMD="docker exec -it so-elastalert elastalert-test-rule /opt/elastalert/rules/$RULE_NAME --config /opt/config/elastalert_config.yaml $OPTIONS"
CMD="docker exec -it so-elastalert elastalert-test-rule /opt/elastalert/rules/$RULE_NAME --config /opt/elastalert/config.yaml $OPTIONS"
if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then
$CMD > "$FILE_SAVE_LOCATION"
else

0
salt/common/tools/sbin/so-elasticsearch-roles-load Normal file → Executable file
View File

View File

@@ -54,7 +54,7 @@ PIPELINES=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_
if [[ "$PIPELINES" -lt 5 ]]; then
echo "Setting up ingest pipeline(s)"
for MODULE in activemq apache auditd aws azure barracuda bluecoat cef checkpoint cisco coredns crowdstrike cyberark cylance elasticsearch envoyproxy f5 fortinet gcp google_workspace googlecloud gsuite haproxy ibmmq icinga iis imperva infoblox iptables juniper kafka kibana logstash microsoft misp mongodb mssql mysql nats netscout nginx o365 okta osquery panw postgresql rabbitmq radware redis santa snort snyk sonicwall sophos squid suricata system tomcat traefik zeek zscaler
for MODULE in activemq apache auditd aws azure barracuda bluecoat cef checkpoint cisco coredns crowdstrike cyberark cylance elasticsearch envoyproxy f5 fortinet gcp google_workspace googlecloud gsuite haproxy ibmmq icinga iis imperva infoblox iptables juniper kafka kibana logstash microsoft mongodb mssql mysql nats netscout nginx o365 okta osquery panw postgresql rabbitmq radware redis santa snort snyk sonicwall sophos squid suricata system threatintel tomcat traefik zeek zscaler
do
echo "Loading $MODULE"
docker exec -i so-filebeat filebeat setup modules -pipelines -modules $MODULE -c $FB_MODULE_YML

8
salt/common/tools/sbin/so-import-evtx Normal file → Executable file
View File

@@ -25,6 +25,7 @@
INDEX_DATE=$(date +'%Y.%m.%d')
RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
LOG_FILE=/nsm/import/evtx-import.log
. /usr/sbin/so-common
@@ -41,14 +42,17 @@ function evtx2es() {
EVTX=$1
HASH=$2
ES_PW=$(lookup_pillar "auth:users:so_elastic_user:pass" "elasticsearch")
ES_USER=$(lookup_pillar "auth:users:so_elastic_user:user" "elasticsearch")
docker run --rm \
-v "$EVTX:/tmp/$RUNID.evtx" \
--entrypoint evtx2es \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \
--host {{ MANAGERIP }} --scheme https \
--index so-beats-$INDEX_DATE --pipeline import.wel \
--login {{ES_USER}} --pwd {{ES_PW}} \
"/tmp/$RUNID.evtx" 1>/dev/null 2>/dev/null
--login $ES_USER --pwd $ES_PW \
"/tmp/$RUNID.evtx" >> $LOG_FILE 2>&1
docker run --rm \
-v "$EVTX:/tmp/import.evtx" \

View File

@@ -8,9 +8,9 @@ fi
echo "This tool will update a manager's IP address to the new IP assigned to the management network interface."
echo
echo ""
echo "WARNING: This tool is still undergoing testing, use at your own risk!"
echo
echo ""
if [ -z "$OLD_IP" ]; then
OLD_IP=$(lookup_pillar "managerip")
@@ -27,7 +27,7 @@ if [ -z "$NEW_IP" ]; then
NEW_IP=$(ip -4 addr list $iface | grep inet | cut -d' ' -f6 | cut -d/ -f1)
if [ -z "$NEW_IP" ]; then
fail "Unable to detect new IP on interface $iface. "
fail "Unable to detect new IP on interface $iface."
fi
echo "Detected new IP $NEW_IP on interface $iface."
@@ -39,15 +39,20 @@ fi
echo "About to change old IP $OLD_IP to new IP $NEW_IP."
echo
echo ""
read -n 1 -p "Would you like to continue? (y/N) " CONTINUE
echo
echo ""
if [ "$CONTINUE" == "y" ]; then
for file in $(grep -rlI $OLD_IP /opt/so/saltstack /etc); do
echo "Updating file: $file"
sed -i "s|$OLD_IP|$NEW_IP|g" $file
done
for file in $(grep -rlI $OLD_IP /opt/so/saltstack /etc); do
echo "Updating file: $file"
sed -i "s|$OLD_IP|$NEW_IP|g" $file
done
echo "Granting MySQL root user permissions on $NEW_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
echo "Removing MySQL root user from $OLD_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
echo "The IP has been changed from $OLD_IP to $NEW_IP."
@@ -60,4 +65,4 @@ if [ "$CONTINUE" == "y" ]; then
fi
else
echo "Exiting without changes."
fi
fi

0
salt/common/tools/sbin/so-playbook-import Normal file → Executable file
View File

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common
docker exec -it so-redis redis-cli llen logstash:unparsed
docker exec so-redis redis-cli llen logstash:unparsed

View File

@@ -182,6 +182,10 @@ function ensureRoleFileExists() {
echo "Database file does not exist yet, installation is likely not yet complete."
fi
if [[ -d "$socRolesFile" ]]; then
echo "Removing invalid roles directory created by Docker"
rm -fr "$socRolesFile"
fi
mv "${rolesTmpFile}" "${socRolesFile}"
fi
}
@@ -445,7 +449,7 @@ function deleteUser() {
rolesTmpFile="${socRolesFile}.tmp"
createFile "$rolesTmpFile" "$soUID" "$soGID"
grep -v "$id" "$socRolesFile" > "$rolesTmpFile"
grep -v "$identityId" "$socRolesFile" > "$rolesTmpFile"
mv "$rolesTmpFile" "$socRolesFile"
}

View File

@@ -45,14 +45,15 @@ so-domainstatsimage:
so-domainstats:
docker_container.running:
- require:
- so-domainstatsimage
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }}
- hostname: domainstats
- name: so-domainstats
- user: domainstats
- binds:
- /opt/so/log/domainstats:/var/log/domain_stats
- require:
- file: dstatslogdir
- cmd: so-domainstatsimage
append_so-domainstats_so-status.conf:
file.append:
@@ -65,4 +66,4 @@ append_so-domainstats_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -122,6 +122,10 @@ so-elastalert:
- {{MANAGER_URL}}:{{MANAGER_IP}}
- require:
- cmd: wait_for_elasticsearch
- file: elastarules
- file: elastalogdir
- file: elastacustmodulesdir
- file: elastaconf
- watch:
- file: elastaconf

View File

@@ -1,8 +1,8 @@
{% set so_elastic_user_pass = salt['random.get_str'](20) %}
{% set so_kibana_user_pass = salt['random.get_str'](20) %}
{% set so_logstash_user_pass = salt['random.get_str'](20) %}
{% set so_beats_user_pass = salt['random.get_str'](20) %}
{% set so_monitor_user_pass = salt['random.get_str'](20) %}
{% set so_elastic_user_pass = salt['random.get_str'](72) %}
{% set so_kibana_user_pass = salt['random.get_str'](72) %}
{% set so_logstash_user_pass = salt['random.get_str'](72) %}
{% set so_beats_user_pass = salt['random.get_str'](72) %}
{% set so_monitor_user_pass = salt['random.get_str'](72) %}
elastic_auth_pillar:
file.managed:

View File

@@ -2,7 +2,7 @@
"description" : "beats.common",
"processors" : [
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational' && ctx.containsKey('winlog')", "name":"win.eventlogs" } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -0,0 +1,155 @@
{
"description" : "ECS Testing Pipeline",
"processors": [
{
"append": {
"field": "event.category",
"value": [
"process"
],
"if": "ctx?.wazuh?.data?.type == 'process'",
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "event.type",
"value": [
"start"
],
"if": "ctx?.wazuh?.data?.type == 'process'",
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "event.type",
"value": "end",
"if": "ctx?.wazuh?.data?.type == 'process_end'",
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "user.name",
"copy_from": "process.user",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "host.os.type",
"copy_from": "wazuh.data.os.sysname",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "host.os.platform",
"copy_from": "wazuh.data.os.platform",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "host.os.name",
"copy_from": "wazuh.data.os.name",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "host.os.version",
"copy_from": "wazuh.data.os.version",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "signal.rule.name",
"copy_from": "rule.name",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "signal.rule.type",
"copy_from": "rule.category",
"ignore_empty_value": true,
"ignore_failure": true
}
},
{
"set": {
"field": "signal.rule.threat.tactic.name",
"copy_from": "rule.mitre.tactic",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true
}
},
{
"append": {
"field": "event.category",
"value": [
"authentication"
],
"if": "if(ctx?.rule?.groups != null) {\n if(ctx?.rule?.groups?.contains('authentication_success')) {\n return true\n }\n if(ctx?.rule?.groups?.contains('authentication_failed')) {\n return true\n }\n return false\n}",
"ignore_failure": true
}
},
{
"set": {
"field": "event.outcome",
"value": "success",
"ignore_empty_value": true,
"if": "ctx?.rule?.groups != null && ctx?.rule?.groups.contains('authentication_success')",
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "event.outcome",
"value": "failure",
"ignore_empty_value": true,
"if": "ctx?.rule?.groups != null && ctx?.rule?.groups.contains('authentication_failed')",
"tag": "test",
"ignore_failure": true
}
},
{
"set": {
"field": "url.path",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true,
"copy_from": "url.original"
}
},
{
"set": {
"field": "url.domain",
"ignore_empty_value": true,
"tag": "test",
"ignore_failure": true,
"copy_from": "kibana.log.meta.req.headers.origin"
}
}
]
}

View File

@@ -15,7 +15,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
include:
- ssl
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
@@ -280,6 +281,24 @@ so-elasticsearch:
- file: esyml
- file: esingestconf
- file: so-elasticsearch-pipelines-file
- require:
- file: esyml
- file: eslog4jfile
- file: nsmesdir
- file: eslogdir
- file: cacertz
- x509: /etc/pki/elasticsearch.crt
- x509: /etc/pki/elasticsearch.key
- file: elasticp12perms
{% if ismanager %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
{% if salt['pillar.get']('elasticsearch:auth:enabled', False) %}
- cmd: auth_users_roles_inode
- cmd: auth_users_inode
{% endif %}
append_so-elasticsearch_so-status.conf:
file.append:

View File

@@ -1,12 +1,14 @@
{%- set INDEX_SORTING = salt['pillar.get']('elasticsearch:index_sorting', True) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-common:refresh', '30s') %}
{
"index_patterns": ["so-*"],
"version":50001,
"order":10,
"settings":{
"number_of_replicas":0,
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":1,
"index.refresh_interval":"30s",
"index.refresh_interval":"{{ REFRESH }}",
"index.routing.allocation.require.box_type":"hot",
"index.mapping.total_fields.limit": "1500",
{%- if INDEX_SORTING is sameas true %}

File diff suppressed because it is too large Load Diff

View File

@@ -25,9 +25,10 @@
{% from 'filebeat/map.jinja' import SO with context %}
{% set ES_INCLUDED_NODES = ['so-eval', 'so-standalone', 'so-managersearch', 'so-node', 'so-heavynode', 'so-import'] %}
include:
- ssl
#only include elastic state for certain nodes
{% if grains.role in ES_INCLUDED_NODES %}
include:
- elasticsearch
{% endif %}
@@ -66,7 +67,7 @@ fileregistrydir:
- makedirs: True
# This needs to be owned by root
filebeatconfsync:
filebeatconf:
file.managed:
- name: /opt/so/conf/filebeat/etc/filebeat.yml
- source: salt://filebeat/etc/filebeat.yml
@@ -78,7 +79,7 @@ filebeatconfsync:
OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }}
# Filebeat module config file
filebeatmoduleconfsync:
filebeatmoduleconf:
file.managed:
- name: /opt/so/conf/filebeat/etc/module-setup.yml
- source: salt://filebeat/etc/module-setup.yml
@@ -135,14 +136,21 @@ so-filebeat:
{% endfor %}
{% endfor %}
- watch:
- file: /opt/so/conf/filebeat/etc/filebeat.yml
- file: filebeatconf
- require:
- file: filebeatconf
- file: filebeatmoduleconf
- file: filebeatmoduledir
- x509: conf_filebeat_crt
- x509: conf_filebeat_key
- x509: trusttheca
{% if grains.role in ES_INCLUDED_NODES %}
run_module_setup:
cmd.run:
- name: /usr/sbin/so-filebeat-module-setup
- require:
- file: filebeatmoduleconfsync
- file: filebeatmoduleconf
- docker_container: so-filebeat
- onchanges:
- docker_container: so-elasticsearch

View File

@@ -244,6 +244,23 @@ third_party_filebeat:
var.input: udp
var.syslog_host: 0.0.0.0
var.syslog_port: 9501
threatintel:
abuseurl:
enabled: false
abusemalware:
enabled: false
misp:
enabled: false
malwarebazaar:
enabled: false
otx:
enabled: false
anomali:
enabled: false
anomalithreatstream:
enabled: false
recordedfuture:
enabled: false
zscaler:
zia:
enabled: false

View File

@@ -162,6 +162,9 @@ role:
elasticsearch_rest:
portgroups:
- {{ portgroups.elasticsearch_rest }}
endgame:
portgroups:
- {{ portgroups.endgame }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
@@ -248,6 +251,9 @@ role:
elasticsearch_rest:
portgroups:
- {{ portgroups.elasticsearch_rest }}
endgame:
portgroups:
- {{ portgroups.endgame }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
@@ -337,6 +343,9 @@ role:
elasticsearch_rest:
portgroups:
- {{ portgroups.elasticsearch_rest }}
endgame:
portgroups:
- {{ portgroups.endgame }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
@@ -594,4 +603,4 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_manager }}
- {{ portgroups.salt_manager }}

View File

@@ -39,6 +39,9 @@ firewall:
elasticsearch_rest:
tcp:
- 9200
endgame:
tcp:
- 3765
fleet_api:
tcp:
- 8090

View File

@@ -17,6 +17,7 @@
include:
- ssl
- mysql
# Fleet Setup
@@ -136,10 +137,13 @@ so-fleet:
- /opt/so/conf/fleet/packs:/packs
- watch:
- /opt/so/conf/fleet/etc
- require:
- x509: fleet_key
- x509: fleet_crt
append_so-fleet_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-fleet
{% endif %}
{% endif %}

View File

@@ -132,6 +132,8 @@ so-grafana:
- 0.0.0.0:3000:3000
- watch:
- file: /opt/so/conf/grafana/*
- require:
- file: grafana-config
append_so-grafana_so-status.conf:
file.append:

View File

@@ -17,6 +17,8 @@
include:
- salt.minion
- salt.python3-influxdb
- ssl
# Influx DB
influxconfdir:
file.directory:
@@ -60,6 +62,10 @@ so-influxdb:
- 0.0.0.0:8086:8086
- watch:
- file: influxdbconf
- require:
- file: influxdbconf
- x509: influxdb_key
- x509: influxdb_crt
append_so-influxdb_so-status.conf:
file.append:

View File

@@ -51,6 +51,8 @@ so-logscan:
- /opt/so/log/logscan:/logscan/output:rw
- /opt/so/log:/logscan/logs:ro
- cpu_period: {{ logscan_cpu_period }}
- require:
- file: logscan_conf
{% else %}
- force: true
{% endif %}

View File

@@ -15,36 +15,37 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MANAGERIP = salt['pillar.get']('global:managerip') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MANAGERIP = salt['pillar.get']('global:managerip') %}
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% elif grains['role'] == 'so-helix' %}
{% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% endif %}
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% elif grains['role'] == 'so-helix' %}
{% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% endif %}
{% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %}
{% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %}
{% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% if grains.role in ['so-heavynode'] %}
{% set EXTRAHOSTHOSTNAME = salt['grains.get']('host') %}
{% set EXTRAHOSTIP = salt['pillar.get']('sensor:mainip') %}
{% else %}
{% set EXTRAHOSTHOSTNAME = MANAGER %}
{% set EXTRAHOSTIP = MANAGERIP %}
{% endif %}
{% if grains.role in ['so-heavynode'] %}
{% set EXTRAHOSTHOSTNAME = salt['grains.get']('host') %}
{% set EXTRAHOSTIP = salt['pillar.get']('sensor:mainip') %}
{% else %}
{% set EXTRAHOSTHOSTNAME = MANAGER %}
{% set EXTRAHOSTIP = MANAGERIP %}
{% endif %}
include:
- ssl
- elasticsearch
# Create the logstash group
@@ -73,22 +74,22 @@ lspipelinedir:
- user: 931
- group: 939
{% for PL in PIPELINES %}
{% for CONFIGFILE in PIPELINES[PL].config %}
{% for PL in PIPELINES %}
{% for CONFIGFILE in PIPELINES[PL].config %}
ls_pipeline_{{PL}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}:
file.managed:
- source: salt://logstash/pipelines/config/{{CONFIGFILE}}
{% if 'jinja' in CONFIGFILE.split('.')[-1] %}
{% if 'jinja' in CONFIGFILE.split('.')[-1] %}
- name: /opt/so/conf/logstash/pipelines/{{PL}}/{{CONFIGFILE.split('/')[1] | replace(".jinja", "")}}
- template: jinja
{% else %}
{% else %}
- name: /opt/so/conf/logstash/pipelines/{{PL}}/{{CONFIGFILE.split('/')[1]}}
{% endif %}
{% endif %}
- user: 931
- group: 939
- mode: 660
- makedirs: True
{% endfor %}
{% endfor %}
ls_pipeline_{{PL}}:
file.directory:
@@ -96,12 +97,12 @@ ls_pipeline_{{PL}}:
- user: 931
- group: 939
- require:
{% for CONFIGFILE in PIPELINES[PL].config %}
{% for CONFIGFILE in PIPELINES[PL].config %}
- file: ls_pipeline_{{PL}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}
{% endfor %}
{% endfor %}
- clean: True
{% endfor %}
{% endfor %}
lspipelinesyml:
file.managed:
@@ -157,50 +158,60 @@ so-logstash:
- environment:
- LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }}
- port_bindings:
{% for BINDING in DOCKER_OPTIONS.port_bindings %}
{% for BINDING in DOCKER_OPTIONS.port_bindings %}
- {{ BINDING }}
{% endfor %}
{% endfor %}
- binds:
- /opt/so/conf/elasticsearch/templates/:/templates/:ro
- /opt/so/conf/logstash/etc/log4j2.properties:/usr/share/logstash/config/log4j2.properties:ro
- /opt/so/conf/logstash/etc/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
- /opt/so/conf/logstash/etc/pipelines.yml:/usr/share/logstash/config/pipelines.yml
- /opt/so/conf/logstash/etc/:/usr/share/logstash/config/:ro
- /opt/so/conf/logstash/pipelines:/usr/share/logstash/pipelines:ro
- /opt/so/rules:/etc/nsm/rules:ro
- /nsm/import:/nsm/import:ro
- /nsm/logstash:/usr/share/logstash/data:rw
- /opt/so/log/logstash:/var/log/logstash:rw
- /sys/fs/cgroup:/sys/fs/cgroup:ro
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %}
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
{% if grains['role'] == 'so-heavynode' %}
{% if grains['role'] == 'so-heavynode' %}
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% else %}
{% else %}
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{% endif %}
{% endif %}
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
- /etc/pki/ca.cer:/ca/ca.crt:ro
{%- if grains['role'] == 'so-eval' %}
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- /nsm/suricata:/suricata:ro
- /nsm/wazuh/logs/alerts:/wazuh/alerts:ro
- /nsm/wazuh/logs/archives:/wazuh/archives:ro
- /opt/so/log/fleet/:/osquery/logs:ro
- /opt/so/log/strelka:/strelka:ro
{%- endif %}
{%- endif %}
- watch:
- file: lsetcsync
{% for PL in PIPELINES %}
{% for PL in PIPELINES %}
- file: ls_pipeline_{{PL}}
{% for CONFIGFILE in PIPELINES[PL].config %}
{% for CONFIGFILE in PIPELINES[PL].config %}
- file: ls_pipeline_{{PL}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}
{% endfor %}
{% endfor %}
{% endfor %}
{% for TEMPLATE in TEMPLATES %}
{% for TEMPLATE in TEMPLATES %}
- file: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}
{% endfor %}
{% endfor %}
- require:
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %}
- x509: etc_filebeat_crt
{% endif %}
{% if grains['role'] == 'so-heavynode' %}
- x509: trusttheca
{% else %}
- x509: pki_public_ca_crt
{% endif %}
- file: cacertz
- file: capemz
append_so-logstash_so-status.conf:
file.append:

View File

@@ -0,0 +1,14 @@
input {
http {
id => "endgame_data"
port => 3765
codec => es_bulk
request_headers_target_field => client_headers
remote_host_target_field => client_host
ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key"
ssl_verify_mode => "peer"
}
}

View File

@@ -0,0 +1,29 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
filter {
if [event][module] =~ "endgame" {
mutate {
remove_field => ["client_headers", "client_host"]
}
}
}
output {
if [event][module] =~ "endgame" {
elasticsearch {
id => "endgame_es_output"
hosts => "{{ ES }}"
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
{% endif %}
index => "endgame-%{+YYYY.MM.dd}"
ssl => true
ssl_certificate_verification => false
}
}
}

View File

@@ -77,7 +77,7 @@ FreshIndexMaxAge: 300
# AllowUserPorts: 80
RedirMax: 6
# VfileUseRangeOps is set for fedora volatile files on mirrors that dont to range
VfileUseRangeOps: 0
VfileUseRangeOps: -1
# PassThroughPattern: private-ppa\.launchpad\.net:443$
# PassThroughPattern: .* # this would allow CONNECT to everything
PassThroughPattern: (repo\.securityonion\.net:443|download\.docker\.com:443|mirrors\.fedoraproject\.org:443|packages\.wazuh\.com:443|repo\.saltstack\.com:443|yum\.dockerproject\.org:443|download\.docker\.com:443|registry\.npmjs\.org:443|registry\.yarnpkg\.com:443)$ # yarn/npm pkg, cant to http :/

View File

@@ -60,8 +60,7 @@ aptcacherlogdir:
- group: 939
- makedirs: true
# Copy the config
acngcopyconf:
acngconf:
file.managed:
- name: /opt/so/conf/aptcacher-ng/etc/acng.conf
- source: salt://manager/files/acng/acng.conf
@@ -80,6 +79,8 @@ so-aptcacherng:
- /opt/so/conf/aptcacher-ng/cache:/var/cache/apt-cacher-ng:rw
- /opt/so/log/aptcacher-ng:/var/log/apt-cacher-ng:rw
- /opt/so/conf/aptcacher-ng/etc/acng.conf:/etc/apt-cacher-ng/acng.conf:ro
- require:
- file: acngconf
append_so-aptcacherng_so-status.conf:
file.append:

View File

@@ -21,6 +21,9 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
include:
- ssl
# Minio Setup
minioconfdir:
file.directory:
@@ -59,6 +62,9 @@ so-minio:
- /etc/pki/minio.key:/.minio/certs/private.key:ro
- /etc/pki/minio.crt:/.minio/certs/public.crt:ro
- entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data"
- require:
- file: minio_key
- file: minio_crt
append_so-minio_so-status.conf:
file.append:
@@ -71,4 +77,4 @@ append_so-minio_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -1,2 +1 @@
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
{{ MYSQLPASS }}

View File

@@ -45,13 +45,22 @@ mysqlpiddir:
- group: 939
- makedirs: True
mysqletcsync:
file.recurse:
- name: /opt/so/conf/mysql/etc
- source: salt://mysql/etc
mysqlcnf:
file.managed:
- name: /opt/so/conf/mysql/etc/my.cnf
- source: salt://mysql/etc/my.cnf
- user: 939
- group: 939
mysqlpass:
file.managed:
- name: /opt/so/conf/mysql/etc/mypass
- source: salt://mysql/etc/mypass
- user: 939
- group: 939
- template: jinja
- defaults:
MYSQLPASS: {{ MYSQLPASS }}
mysqllogdir:
file.directory:
@@ -94,6 +103,9 @@ so-mysql:
- /opt/so/log/mysql:/var/log/mysql:rw
- watch:
- /opt/so/conf/mysql/etc
- require:
- file: mysqlcnf
- file: mysqlpass
cmd.run:
- name: until nc -z {{ MAINIP }} 3306; do sleep 1; done
- timeout: 600
@@ -118,4 +130,4 @@ append_so-mysql_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -8,6 +8,9 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap') %}
include:
- ssl
# Drop the correct nginx config based on role
nginxconfdir:
file.directory:
@@ -73,28 +76,38 @@ so-nginx:
- /opt/so/log/nginx/:/var/log/nginx:rw
- /opt/so/tmp/nginx/:/var/lib/nginx:rw
- /opt/so/tmp/nginx/:/run:rw
- /opt/so/conf/fleet/packages:/opt/socore/html/packages
{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %}
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
- /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
- /opt/so/conf/fleet/packages:/opt/socore/html/packages
{% if ISAIRGAP is sameas true %}
- /nsm/repo:/opt/socore/html/repo:ro
{% endif %}
# ATT&CK Navigator binds
- /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro
- /opt/so/conf/navigator/nav_layer_playbook.json:/opt/socore/html/navigator/assets/playbook.json:ro
{% endif %}
{% if ISAIRGAP is sameas true %}
- /nsm/repo:/opt/socore/html/repo:ro
{% endif %}
- cap_add: NET_BIND_SERVICE
- port_bindings:
- 80:80
- 443:443
{% if ISAIRGAP is sameas true %}
{% if ISAIRGAP is sameas true %}
- 7788:7788
{% endif %}
{%- if FLEETMANAGER or FLEETNODE %}
{% endif %}
{%- if FLEETMANAGER or FLEETNODE %}
- 8090:8090
{%- endif %}
{%- endif %}
- watch:
- file: nginxconf
- file: nginxconfdir
- require:
- file: nginxconf
{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %}
- x509: managerssl_key
- x509: managerssl_crt
- file: navigatorconfig
- file: navigatordefaultlayer
{% endif %}
append_so-nginx_so-status.conf:
file.append:
@@ -107,4 +120,4 @@ append_so-nginx_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -1,20 +1,23 @@
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{%- set diskfreepercentage = salt['pillar.get']('steno:diskfreepercentage', 10) %}
{%- set maxfiles = salt['pillar.get']('steno:maxfiles', 30000) %}
{%- set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
{%- set DISKFREEPERCENTAGE = salt['pillar.get']('steno:diskfreepercentage', 10) %}
{%- set MAXFILES = salt['pillar.get']('steno:maxfiles', 30000) %}
{%- set BLOCKS = salt['pillar.get']('steno:blocks', 2048) %}
{%- set FILEMB = salt['pillar.get']('steno:filemb', 4096) %}
{%- set AIOPS = salt['pillar.get']('steno:aiops', 128) %}
{%- set THREADS = salt['pillar.get']('steno:threads', 1) %}
{
"Threads": [
{ "PacketsDirectory": "/nsm/pcap"
, "IndexDirectory": "/nsm/pcapindex"
, "MaxDirectoryFiles": {{ maxfiles }}
, "DiskFreePercentage": {{ diskfreepercentage }}
}
{ "PacketsDirectory": "/nsm/pcap", "IndexDirectory": "/nsm/pcapindex", "MaxDirectoryFiles": {{ MAXFILES }}, "DiskFreePercentage": {{ DISKFREEPERCENTAGE }} }
{%- if THREADS > 1 %}
{%- for i in range(2,THREADS+1) %}
, { "PacketsDirectory": "/nsm/pcap" , "IndexDirectory": "/nsm/pcapindex", "MaxDirectoryFiles": {{ MAXFILES }}, "DiskFreePercentage": {{ DISKFREEPERCENTAGE }} }
{%- endfor %}
{%- endif %}
]
, "StenotypePath": "/usr/bin/stenotype"
, "Interface": "{{ interface }}"
, "Interface": "{{ INTERFACE }}"
, "Port": 1234
, "Host": "127.0.0.1"
, "Flags": ["-v", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}]
, "Flags": ["-v", "--blocks={{ BLOCKS }}", "--preallocate_file_mb={{ FILEMB }}", "--aiops={{ AIOPS }}", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}]
, "CertPath": "/etc/stenographer/certs"
}

View File

@@ -117,8 +117,6 @@ so-steno:
- start: {{ STENOOPTIONS.start }}
- network_mode: host
- privileged: True
- port_bindings:
- 127.0.0.1:1234:1234
- binds:
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw
- /opt/so/conf/steno/config:/etc/stenographer/config:rw
@@ -127,7 +125,9 @@ so-steno:
- /nsm/pcaptmp:/tmp:rw
- /opt/so/log/stenographer:/var/log/stenographer:rw
- watch:
- file: /opt/so/conf/steno/config
- file: stenoconf
- require:
- file: stenoconf
{% else %} {# if stenographer isn't enabled, then stop and remove the container #}
- force: True
{% endif %}

View File

@@ -19,6 +19,9 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
include:
- ssl
# Redis Setup
redisconfdir:
file.directory:
@@ -41,10 +44,10 @@ redislogdir:
- group: 939
- makedirs: True
redisconfsync:
file.recurse:
- name: /opt/so/conf/redis/etc
- source: salt://redis/etc
redisconf:
file.managed:
- name: /opt/so/conf/redis/etc/redis.conf
- source: salt://redis/etc/redis.conf
- user: 939
- group: 939
- template: jinja
@@ -67,6 +70,11 @@ so-redis:
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch:
- file: /opt/so/conf/redis/etc
- require:
- file: redisconf
- x509: redis_crt
- x509: redis_key
- x509: pki_public_ca_crt
append_so-redis_so-status.conf:
file.append:
@@ -79,4 +87,4 @@ append_so-redis_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -1,6 +1,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
include:
- ssl
# Create the config directory for the docker registry
dockerregistryconfdir:
file.directory:
@@ -47,6 +50,10 @@ so-dockerregistry:
- retry:
attempts: 5
interval: 30
- require:
- file: dockerregistryconf
- x509: registry_crt
- x509: registry_key
append_so-dockerregistry_so-status.conf:
file.append:

View File

@@ -87,4 +87,4 @@ salt_minion_service:
patch_pkg:
pkg.installed:
- name: patch
- name: patch

View File

@@ -17,7 +17,7 @@
"agent": {
"nodeId": "{{ grains.host | lower }}",
"role": "{{ grains.role }}",
"description": "{{ DESCRIPTION }}",
"description": {{ DESCRIPTION | tojson }},
"address": "{{ ADDRESS }}",
"model": "{{ MODEL }}",
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},

View File

@@ -38,8 +38,10 @@ so-sensoroni:
- /opt/so/log/sensoroni:/opt/sensoroni/logs:rw
- watch:
- file: /opt/so/conf/sensoroni/sensoroni.json
- require:
- file: sensoroniagentconf
append_so-sensoroni_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-sensoroni
- text: so-sensoroni

View File

@@ -1,3 +1,4 @@
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
[
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
"links": [
@@ -29,5 +30,12 @@
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "target": "_blank",
"links": [
"https://www.virustotal.com/gui/search/{value}"
]}
]}
{%- if HIGHLANDER %}
{%- set EGHOST = salt['pillar.get']('soc:endgamehost', 'EGHOSTNOTPOPULATED') %}
,{ "name": "Endgame", "description": "Endgame Endpoint Investigation and Response", "icon": "fa-external-link-alt", "target": "_blank",
"links": [
"https://{{ EGHOST }}/endpoints/{:agent.id}"
]}
{% endif %}
]

View File

@@ -26,7 +26,7 @@
{%- set ES_USER = '' %}
{%- set ES_PASS = '' %}
{%- endif %}
{%- set ES_INDEX_PATTERNS = salt['pillar.get']('soc:es_index_patterns', '*:so-*') %}
{
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
"server": {
@@ -57,6 +57,7 @@
{%- endif %}
"username": "{{ ES_USER }}",
"password": "{{ ES_PASS }}",
"index": "{{ ES_INDEX_PATTERNS }}",
"cacheMs": {{ ES_FIELDCAPS_CACHE }},
"verifyCert": false,
"timeoutMs": {{ API_TIMEOUT }}

View File

@@ -26,6 +26,15 @@ soclogdir:
- group: 939
- makedirs: True
socactions:
file.managed:
- name: /opt/so/conf/soc/menu.actions.json
- source: salt://soc/files/soc/menu.actions.json
- user: 939
- group: 939
- mode: 600
- template: jinja
socconfig:
file.managed:
- name: /opt/so/conf/soc/soc.json
@@ -71,6 +80,10 @@ soccustomroles:
- mode: 600
- template: jinja
socusersroles:
file.exists:
- name: /opt/so/conf/soc/soc_users_roles
# we dont want this added too early in setup, so we add the onlyif to verify 'startup_states: highstate'
# is in the minion config. That line is added before the final highstate during setup
sosyncusers:
@@ -86,13 +99,13 @@ so-soc:
- name: so-soc
- binds:
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro
- /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro
- /opt/so/conf/soc/custom.js:/opt/sensoroni/html/js/custom.js:ro
- /opt/so/conf/soc/custom_roles:/opt/sensoroni/rbac/custom_roles:ro
- /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
{%- if salt['pillar.get']('nodestab', {}) %}
- extra_hosts:
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
@@ -103,6 +116,15 @@ so-soc:
- 0.0.0.0:9822:9822
- watch:
- file: /opt/so/conf/soc/*
- require:
- file: socdatadir
- file: soclogdir
- file: socconfig
- file: socmotd
- file: socbanner
- file: soccustom
- file: soccustomroles
- file: socusersroles
append_so-soc_so-status.conf:
file.append:
@@ -145,6 +167,14 @@ kratossync:
- file_mode: 600
- template: jinja
kratos_schema:
file.exists:
- name: /opt/so/conf/kratos/schema.json
kratos_yaml:
file.exists:
- name: /opt/so/conf/kratos/kratos.yaml
so-kratos:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kratos:{{ VERSION }}
@@ -160,6 +190,11 @@ so-kratos:
- 0.0.0.0:4434:4434
- watch:
- file: /opt/so/conf/kratos
- require:
- file: kratos_schema
- file: kratos_yaml
- file: kratoslogdir
- file: kratosdir
append_so-kratos_so-status.conf:
file.append:

View File

@@ -8,6 +8,9 @@
{% set MANAGER_IP = salt['pillar.get']('global:managerip', '') %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
include:
- nginx
soctopusdir:
file.directory:
- name: /opt/so/conf/soctopus/sigma-import
@@ -71,6 +74,9 @@ so-soctopus:
- 0.0.0.0:7000:7000
- extra_hosts:
- {{MANAGER_URL}}:{{MANAGER_IP}}
- require:
- file: soctopusconf
- file: navigatordefaultlayer
append_so-soctopus_so-status.conf:
file.append:
@@ -83,4 +89,4 @@ append_so-soctopus_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -30,6 +30,9 @@
{% set ca_server = global_ca_server[0] %}
{% endif %}
include:
- ca
# Trust the CA
trusttheca:
x509.pem_managed:
@@ -64,8 +67,9 @@ removeesp12dir:
- name: /etc/pki/elasticsearch.p12
- onlyif: "[ -d /etc/pki/elasticsearch.p12 ]"
/etc/pki/influxdb.key:
influxdb_key:
x509.private_key_managed:
- name: /etc/pki/influxdb.key
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
@@ -82,8 +86,9 @@ removeesp12dir:
interval: 30
# Create a cert for the talking to influxdb
/etc/pki/influxdb.crt:
influxdb_crt:
x509.certificate_managed:
- name: /etc/pki/influxdb.crt
- ca_server: {{ ca_server }}
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
@@ -112,8 +117,9 @@ influxkeyperms:
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet'] %}
# Create a cert for Redis encryption
/etc/pki/redis.key:
redis_key:
x509.private_key_managed:
- name: /etc/pki/redis.key
- CN: {{ COMMONNAME }}
- bits: 4096
- days_remaining: 0
@@ -129,8 +135,9 @@ influxkeyperms:
attempts: 5
interval: 30
/etc/pki/redis.crt:
redis_crt:
x509.certificate_managed:
- name: /etc/pki/redis.crt
- ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/redis.key
@@ -158,8 +165,9 @@ rediskeyperms:
{% endif %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %}
/etc/pki/filebeat.key:
etc_filebeat_key:
x509.private_key_managed:
- name: /etc/pki/filebeat.key
- CN: {{ COMMONNAME }}
- bits: 4096
- days_remaining: 0
@@ -168,7 +176,7 @@ rediskeyperms:
- new: True
{% if salt['file.file_exists']('/etc/pki/filebeat.key') -%}
- prereq:
- x509: /etc/pki/filebeat.crt
- x509: etc_filebeat_crt
{%- endif %}
- timeout: 30
- retry:
@@ -176,8 +184,9 @@ rediskeyperms:
interval: 30
# Request a cert and drop it where it needs to go to be distributed
/etc/pki/filebeat.crt:
etc_filebeat_crt:
x509.certificate_managed:
- name: /etc/pki/filebeat.crt
- ca_server: {{ ca_server }}
- signing_policy: filebeat
- public_key: /etc/pki/filebeat.key
@@ -198,7 +207,7 @@ rediskeyperms:
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt"
- onchanges:
- x509: /etc/pki/filebeat.key
- x509: etc_filebeat_key
fbperms:
@@ -237,8 +246,9 @@ fbcrtlink:
- user: socore
- group: socore
/etc/pki/registry.key:
registry_key:
x509.private_key_managed:
- name: /etc/pki/registry.key
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
@@ -255,9 +265,11 @@ fbcrtlink:
interval: 30
# Create a cert for the docker registry
/etc/pki/registry.crt:
registry_crt:
x509.certificate_managed:
- name: /etc/pki/registry.crt
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ manager }}, IP:{{ managerip }}
- signing_policy: registry
- public_key: /etc/pki/registry.key
- CN: {{ manager }}
@@ -280,8 +292,9 @@ regkeyperms:
- mode: 640
- group: 939
/etc/pki/minio.key:
minio_key:
x509.private_key_managed:
- name: /etc/pki/minio.key
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
@@ -298,8 +311,9 @@ regkeyperms:
interval: 30
# Create a cert for minio
/etc/pki/minio.crt:
minio_crt:
x509.certificate_managed:
- name: /etc/pki/minio.crt
- ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/minio.key
@@ -379,8 +393,9 @@ elasticp12perms:
- mode: 640
- group: 930
/etc/pki/managerssl.key:
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
@@ -397,8 +412,9 @@ elasticp12perms:
interval: 30
# Create a cert for the reverse proxy
/etc/pki/managerssl.crt:
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ ca_server }}
- signing_policy: managerssl
- public_key: /etc/pki/managerssl.key
@@ -424,8 +440,9 @@ msslkeyperms:
- group: 939
# Create a private key and cert for OSQuery
/etc/pki/fleet.key:
fleet_key:
x509.private_key_managed:
- name: /etc/pki/fleet.key
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
@@ -441,11 +458,12 @@ msslkeyperms:
attempts: 5
interval: 30
/etc/pki/fleet.crt:
fleet_crt:
x509.certificate_managed:
- name: /etc/pki/fleet.crt
- signing_private_key: /etc/pki/fleet.key
- CN: {{ manager }}
- subjectAltName: DNS:{{ manager }},IP:{{ managerip }}
- subjectAltName: DNS:{{ manager }},IP:{{ managerip }}{% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }}{% endif %}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -473,8 +491,9 @@ fbcertdir:
- name: /opt/so/conf/filebeat/etc/pki
- makedirs: True
/opt/so/conf/filebeat/etc/pki/filebeat.key:
conf_filebeat_key:
x509.private_key_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ COMMONNAME }}
- bits: 4096
- days_remaining: 0
@@ -483,7 +502,7 @@ fbcertdir:
- new: True
{% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%}
- prereq:
- x509: /opt/so/conf/filebeat/etc/pki/filebeat.crt
- x509: conf_filebeat_crt
{%- endif %}
- timeout: 30
- retry:
@@ -491,8 +510,9 @@ fbcertdir:
interval: 30
# Request a cert and drop it where it needs to go to be distributed
/opt/so/conf/filebeat/etc/pki/filebeat.crt:
conf_filebeat_crt:
x509.certificate_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
- ca_server: {{ ca_server }}
- signing_policy: filebeat
- public_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
@@ -516,7 +536,7 @@ filebeatpkcs:
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:"
- onchanges:
- x509: /opt/so/conf/filebeat/etc/pki/filebeat.key
- x509: conf_filebeat_key
filebeatkeyperms:
file.managed:
@@ -537,8 +557,9 @@ chownfilebeatp8:
{% if grains['role'] == 'so-fleet' %}
/etc/pki/managerssl.key:
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
@@ -555,8 +576,9 @@ chownfilebeatp8:
interval: 30
# Create a cert for the reverse proxy
/etc/pki/managerssl.crt:
managerssl_crt:
x509.certificate_managed:
- name: /etc/pki/managerssl.crt
- ca_server: {{ ca_server }}
- signing_policy: managerssl
- public_key: /etc/pki/managerssl.key
@@ -582,8 +604,9 @@ msslkeyperms:
- group: 939
# Create a private key and cert for Fleet
/etc/pki/fleet.key:
fleet_key:
x509.private_key_managed:
- name: /etc/pki/fleet.key
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
@@ -599,8 +622,9 @@ msslkeyperms:
attempts: 5
interval: 30
/etc/pki/fleet.crt:
fleet_crt:
x509.certificate_managed:
- name: /etc/pki/fleet.crt
- signing_private_key: /etc/pki/fleet.key
- CN: {{ HOSTNAME }}
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }} {% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }} {% endif %}

View File

@@ -93,7 +93,7 @@ surilogscript:
- month: '*'
- dayweek: '*'
suriconfigsync:
suriconfig:
file.managed:
- name: /opt/so/conf/suricata/suricata.yaml
- source: salt://suricata/files/suricata.yaml.jinja
@@ -155,10 +155,14 @@ so-suricata:
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
- network_mode: host
- watch:
- file: /opt/so/conf/suricata/suricata.yaml
- file: suriconfig
- file: surithresholding
- file: /opt/so/conf/suricata/rules/
- file: /opt/so/conf/suricata/bpf
- require:
- file: suriconfig
- file: surithresholding
- file: suribpf
{% else %} {# if Suricata isn't enabled, then stop and remove the container #}
- force: True

View File

@@ -16,10 +16,13 @@
{%- set MANAGER = salt['grains.get']('master') %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %}
{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
{% set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
{%- set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
{%- set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
{%- set ZEEK_ENABLED = salt['pillar.get']('zeek:enabled', True) %}
{%- set MDENGINE = salt['pillar.get']('global:mdengine', 'ZEEK') %}
# Global tags can be specified here in key="value" format.
[global_tags]
@@ -621,23 +624,25 @@
# # Read stats from one or more Elasticsearch servers or clusters
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
[[inputs.elasticsearch]]
servers = ["https://{{ MANAGER }}:9200"]
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
cluster_stats = true
{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
username = "{{ ES_USER }}"
password = "{{ ES_PASS }}"
{% endif %}
{%- endif %}
insecure_skip_verify = true
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
{%- elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
[[inputs.elasticsearch]]
servers = ["https://{{ NODEIP }}:9200"]
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
cluster_stats = true
{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
username = "{{ ES_USER }}"
password = "{{ ES_PASS }}"
{% endif %}
{%- endif %}
insecure_skip_verify = true
{% endif %}
{%- endif %}
#
# ## Timeout for HTTP requests to the elastic search server(s)
@@ -673,9 +678,22 @@
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
{% if grains.role in ['so-node','so-standalone','so-manager', 'so-managersearch', 'so-heavynode'] -%}
[[inputs.logstash]]
url = "http://localhost:9600"
collect = ["pipelines"]
{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
username = "{{ salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user') }}"
password = "{{ salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass') }}"
{% endif %}
{%- endif %}
{% if grains.role in ['so-eval','so-standalone','so-manager', 'so-managersearch', 'so-heavynode'] -%}
[[inputs.redis]]
servers = ["tcp://localhost:6379"]
{%- endif %}
# # Read metrics from one or more commands that can output to stdout
[[inputs.exec]]
commands = [
"/scripts/sostatus.sh"
@@ -725,10 +743,10 @@
"/scripts/stenoloss.sh",
"/scripts/suriloss.sh",
"/scripts/checkfiles.sh",
{% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %}
{%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %}
"/scripts/zeekloss.sh",
"/scripts/zeekcaptureloss.sh",
{% endif %}
{%- endif %}
"/scripts/oldpcap.sh",
"/scripts/raid.sh",
"/scripts/beatseps.sh"
@@ -742,10 +760,10 @@
"/scripts/stenoloss.sh",
"/scripts/suriloss.sh",
"/scripts/checkfiles.sh",
{% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %}
{%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %}
"/scripts/zeekloss.sh",
"/scripts/zeekcaptureloss.sh",
{% endif %}
{%- endif %}
"/scripts/oldpcap.sh",
"/scripts/eps.sh",
"/scripts/raid.sh",
@@ -761,10 +779,10 @@
"/scripts/stenoloss.sh",
"/scripts/suriloss.sh",
"/scripts/checkfiles.sh",
{% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %}
{%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %}
"/scripts/zeekloss.sh",
"/scripts/zeekcaptureloss.sh",
{% endif %}
{%- endif %}
"/scripts/oldpcap.sh",
"/scripts/eps.sh",
"/scripts/raid.sh",
@@ -779,10 +797,10 @@
"/scripts/stenoloss.sh",
"/scripts/suriloss.sh",
"/scripts/checkfiles.sh",
{% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %}
{%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %}
"/scripts/zeekloss.sh",
"/scripts/zeekcaptureloss.sh",
{% endif %}
{%- endif %}
"/scripts/oldpcap.sh",
"/scripts/influxdbsize.sh",
"/scripts/raid.sh",
@@ -796,10 +814,10 @@
"/scripts/stenoloss.sh",
"/scripts/suriloss.sh",
"/scripts/checkfiles.sh",
{% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %}
{%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %}
"/scripts/zeekloss.sh",
"/scripts/zeekcaptureloss.sh",
{% endif %}
{%- endif %}
"/scripts/oldpcap.sh",
"/scripts/helixeps.sh"
]

View File

@@ -5,6 +5,9 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
include:
- ssl
# Add Telegraf to monitor all the things.
tgraflogdir:
file.directory:
@@ -88,7 +91,16 @@ so-telegraf:
- file: tgrafconf
- file: tgrafsyncscripts
- file: node_config
- require:
- file: tgrafconf
- file: node_config
{% if grains['role'] == 'so-manager' or grains['role'] == 'so-eval' or grains['role'] == 'so-managersearch' %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
- x509: influxdb_crt
- x509: influxdb_key
append_so-telegraf_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf

View File

@@ -19,25 +19,30 @@ THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
if [ ! "$THEGREP" ]; then
TSFILE=/var/log/telegraf/laststenodrop.log
if [ -f "$TSFILE" ]; then
LASTTS=$(cat $TSFILE)
else
LASTTS=0
CHECKIT=$(grep "Thread 0" /var/log/stenographer/stenographer.log |tac |head -2|wc -l)
STENOGREP=$(grep "Thread 0" /var/log/stenographer/stenographer.log |tac |head -2)
declare RESULT=($STENOGREP)
CURRENT_PACKETS=$(echo ${RESULT[9]} | awk -F'=' '{print $2 }')
CURRENT_DROPS=$(echo ${RESULT[12]} | awk -F'=' '{print $2 }')
PREVIOUS_PACKETS=$(echo ${RESULT[23]} | awk -F'=' '{print $2 }')
PREVIOUS_DROPS=$(echo ${RESULT[26]} | awk -F'=' '{print $2 }')
DROPPED=$((CURRENT_DROPS - PREVIOUS_DROPS))
TOTAL_CURRENT=$((CURRENT_PACKETS + CURRENT_DROPS))
TOTAL_PAST=$((PREVIOUS_PACKETS + PREVIOUS_DROPS))
TOTAL=$((TOTAL_CURRENT - TOTAL_PAST))
if [ $CHECKIT == 2 ]; then
if [ $DROPPED == 0 ]; then
echo "stenodrop drop=$DROPPED"
else
LOSS=$(echo "4 k $DROPPED $TOTAL / 100 * p" | dc)
echo "stenodrop drop=$LOSS"
fi
fi
# Get the data
LOGLINE=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop)
CURRENTTS=$(echo $LOGLINE | awk '{print $1}')
if [[ "$CURRENTTS" != "$LASTTS" ]]; then
DROP=$(echo $LOGLINE | awk '{print $14}' | awk -F "=" '{print $2}')
echo $CURRENTTS > $TSFILE
else
DROP=0
fi
echo "stenodrop drop=$DROP"
else
exit 0
fi

View File

@@ -73,6 +73,14 @@ thehiveesdata:
- user: 939
- group: 939
thehive_elasticsearch_yml:
file.exists:
- name: /opt/so/conf/thehive/etc/es/elasticsearch.yml
log4j2_properties:
file.exists:
- name: /opt/so/conf/thehive/etc/es/log4j2.properties
so-thehive-es:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }}
@@ -91,12 +99,23 @@ so-thehive-es:
- port_bindings:
- 0.0.0.0:9400:9400
- 0.0.0.0:9500:9500
- require:
- file: thehive_elasticsearch_yml
- file: log4j2_properties
append_so-thehive-es_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-thehive-es
cortex_application_conf:
file.exists:
- name: /opt/so/conf/thehive/etc/cortex-application.conf
application_conf:
file.exists:
- name: /opt/so/conf/thehive/etc/application.conf
# Install Cortex
so-cortex:
docker_container.running:
@@ -110,6 +129,8 @@ so-cortex:
- /opt/so/conf/cortex/custom-responders:/custom-responders:ro
- port_bindings:
- 0.0.0.0:9001:9001
- require:
- file: cortex_application_conf
append_so-cortex_so-status.conf:
file.append:
@@ -135,6 +156,8 @@ so-thehive:
- /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9000:9000
- require:
- file: application_conf
append_so-thehive_so-status.conf:
file.append:

View File

@@ -29,7 +29,7 @@ cortex_init(){
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
if wait_for_web_response $CORTEX_URL "Cortex"; then
if wait_for_web_response $CORTEX_URL "Cortex" 120; then
# Migrate DB
curl -sk -XPOST -L "$CORTEX_API_URL/maintenance/migrate"
@@ -65,7 +65,7 @@ if [ -f /opt/so/state/cortex.txt ]; then
cortex_clean
exit 0
else
if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"'; then
if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then
cortex_init
cortex_clean
else

View File

@@ -20,7 +20,7 @@ thehive_init(){
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..."
if wait_for_web_response $THEHIVE_URL "TheHive"; then
if wait_for_web_response $THEHIVE_URL "TheHive" 120; then
# Migrate DB
curl -sk -XPOST -L "$THEHIVE_API_URL/maintenance/migrate"
@@ -43,7 +43,7 @@ if [ -f /opt/so/state/thehive.txt ]; then
thehive_clean
exit 0
else
if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"'; then
if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then
thehive_init
thehive_clean
else

View File

@@ -90,11 +90,14 @@ zeekpolicysync:
# Ensure the zeek spool tree (and state.db) ownership is correct
zeekspoolownership:
file.directory:
- name: /nsm/zeek
- name: /nsm/zeek/spool
- user: 937
- max_depth: 1
- recurse:
- user
zeekstatedbownership:
file.managed:
- name: /nsm/zeek/spool/state.db
- user: 937
- replace: False
- create: False
# Sync Intel
zeekintelloadsync:
@@ -116,7 +119,7 @@ zeekctlcfg:
ZEEKCTL: {{ ZEEK.zeekctl | tojson }}
# Sync node.cfg
nodecfgsync:
nodecfg:
file.managed:
- name: /opt/so/conf/zeek/node.cfg
- source: salt://zeek/files/node.cfg
@@ -146,7 +149,7 @@ plcronscript:
- mode: 755
zeekpacketlosscron:
cron.present:
cron.{{ZEEKOPTIONS.pl_cron_state}}:
- name: /usr/local/bin/packetloss.sh
- user: root
- minute: '*/10'
@@ -182,7 +185,7 @@ zeekbpf:
{% endif %}
localzeeksync:
localzeek:
file.managed:
- name: /opt/so/conf/zeek/local.zeek
- source: salt://zeek/files/local.zeek.jinja
@@ -219,6 +222,11 @@ so-zeek:
- file: /opt/so/conf/zeek/zeekctl.cfg
- file: /opt/so/conf/zeek/policy
- file: /opt/so/conf/zeek/bpf
- require:
- file: localzeek
- file: nodecfg
- file: zeekctlcfg
- file: zeekbpf
{% else %} {# if Zeek isn't enabled, then stop and remove the container #}
- force: True
{% endif %}
@@ -247,4 +255,4 @@ delete_so-zeek_so-status.disabled:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -1,15 +1,17 @@
{% set ZEEKOPTIONS = {} %}
{% set ENABLED = salt['pillar.get']('zeek:enabled', 'True') %}
{% set ENABLED = salt['pillar.get']('zeek:enabled', True) %}
# don't start the docker container if it is an import node or disabled via pillar
{% if grains.id.split('_')|last == 'import' or ENABLED is sameas false %}
{% if grains.id.split('_')|last == 'import' or not ENABLED %}
{% do ZEEKOPTIONS.update({'start': False}) %}
{% do ZEEKOPTIONS.update({'pl_cron_state': 'absent'}) %}
{% else %}
{% do ZEEKOPTIONS.update({'start': True}) %}
{% do ZEEKOPTIONS.update({'pl_cron_state': 'present'}) %}
{% endif %}
{% if ENABLED is sameas false %}
{% if not ENABLED %}
{% do ZEEKOPTIONS.update({'status': 'absent'}) %}
{% else %}
{% do ZEEKOPTIONS.update({'status': 'running'}) %}
{% endif %}
{% endif %}

View File

@@ -35,7 +35,6 @@ ADMINPASS2=onionuser
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=distributed-search
INTERWEBS=AIRGAP
install_type=SEARCHNODE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=

View File

@@ -35,7 +35,6 @@ ZEEKVERSION=ZEEK
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=distributed-sensor
INTERWEBS=AIRGAP
install_type=SENSOR
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=

View File

@@ -475,10 +475,15 @@ collect_mngr_hostname() {
whiptail_management_server "$MSRV"
done
while [[ $MSRV == "$HOSTNAME" ]]; do
whiptail_invalid_hostname 0
whiptail_management_server "$MSRV"
done
if ! getent hosts "$MSRV"; then
whiptail_manager_ip
while ! valid_ip4 "$MSRVIP"; do
while ! valid_ip4 "$MSRVIP" || [[ $MSRVIP == "$MAINIP" || $MSRVIP == "127.0.0.1" ]]; do
whiptail_invalid_input
whiptail_manager_ip "$MSRVIP"
done
@@ -846,7 +851,7 @@ check_requirements() {
local req_cores
local req_storage
local nic_list
readarray -t nic_list <<< "$(ip link| awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "bond0" | sed 's/ //g')"
readarray -t nic_list <<< "$(ip link| awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "bond0" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g')"
local num_nics=${#nic_list[@]}
if [[ "$standalone_or_dist" == 'standalone' ]]; then
@@ -1124,9 +1129,10 @@ detect_os() {
installer_progress_loop() {
local i=0
local msg="${1:-Performing background actions...}"
while true; do
[[ $i -lt 98 ]] && ((i++))
set_progress_str "$i" 'Checking that all required packages are installed and enabled...' nolog
set_progress_str "$i" "$msg" nolog
[[ $i -gt 0 ]] && sleep 5s
done
}
@@ -1209,11 +1215,7 @@ docker_install() {
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
;;
esac
if [ $OSVER != "xenial" ]; then
retry 50 10 "apt-get -y install docker-ce python3-docker" >> "$setup_log" 2>&1 || exit 1
else
retry 50 10 "apt-get -y install docker-ce python-docker" >> "$setup_log" 2>&1 || exit 1
fi
retry 50 10 "apt-get -y install docker-ce python3-docker" >> "$setup_log" 2>&1 || exit 1
fi
docker_registry
{
@@ -1229,7 +1231,7 @@ docker_registry() {
mkdir -p /etc/docker >> "$setup_log" 2>&1
# This will get applied so docker can attempt to start
if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0
DOCKERNET=172.17.0.0
fi
# Make the host use the manager docker registry
DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
@@ -1378,7 +1380,7 @@ filter_unused_nics() {
fi
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g')
readarray -t filtered_nics <<< "$filtered_nics"
nic_list=()
@@ -1421,7 +1423,7 @@ firewall_generate_templates() {
cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/ >> "$setup_log" 2>&1
for i in analyst beats_endpoint sensor manager minion osquery_endpoint search_node wazuh_endpoint; do
for i in analyst beats_endpoint endgame sensor manager minion osquery_endpoint search_node wazuh_endpoint; do
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1
done
@@ -1513,7 +1515,7 @@ host_pillar() {
" mainint: '$MNIC'"\
"sensoroni:"\
" node_address: '$MAINIP'"\
" node_description: '$NODE_DESCRIPTION'"\
" node_description: '${NODE_DESCRIPTION//\'/''}'"\
"" > "$pillar_file"
}
@@ -1539,6 +1541,10 @@ install_cleanup() {
info "Removing so-setup permission entry from sudoers file"
sed -i '/so-setup/d' /etc/sudoers
fi
if [[ -z $SO_ERROR ]]; then
echo "Setup completed at $(date)" >> "$setup_log" 2>&1
fi
}
import_registry_docker() {
@@ -1640,6 +1646,14 @@ manager_pillar() {
printf '%s\n'\
" kratoskey: '$KRATOSKEY'"\
"" >> "$pillar_file"
if [[ -n $ENDGAMEHOST ]]; then
printf '%s\n'\
"soc:"\
" endgamehost: '$ENDGAMEHOST'"\
" es_index_patterns: '*:so-*,*:endgame-*'"\
"" >> "$pillar_file"
fi
}
manager_global() {
@@ -2039,7 +2053,7 @@ reinstall_init() {
if command -v docker &> /dev/null; then
# Stop and remove all so-* containers so files can be changed with more safety
if [ $(docker ps -a -q --filter "name=so-" | wc -l) -gt 0 ]; then
if [[ $(docker ps -a -q --filter "name=so-" | wc -l) -gt 0 ]]; then
docker stop $(docker ps -a -q --filter "name=so-")
docker rm -f $(docker ps -a -q --filter "name=so-")
fi
@@ -2058,6 +2072,10 @@ reinstall_init() {
# Remove the old launcher package in case the config changes
remove_package launcher-final
if [[ $OS == 'ubuntu' ]]; then
apt-mark unhold $(apt-mark showhold)
fi
} >> "$setup_log" 2>&1
}
@@ -2152,6 +2170,7 @@ saltify() {
python36-m2crypto\
python36-mysql\
python36-packaging\
python36-lxml\
yum-utils\
device-mapper-persistent-data\
lvm2\
@@ -2166,10 +2185,10 @@ saltify() {
DEBIAN_FRONTEND=noninteractive retry 50 10 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || exit 1
if [ $OSVER == "bionic" ]; then
# Switch to Python 3 as default if this is not xenial
# Switch to Python 3 as default for bionic
update-alternatives --install /usr/bin/python python /usr/bin/python3.6 10 >> "$setup_log" 2>&1
elif [ $OSVER == "focal" ]; then
# Switch to Python 3 as default if this is not xenial
# Switch to Python 3 as default for focal
update-alternatives --install /usr/bin/python python /usr/bin/python3.8 10 >> "$setup_log" 2>&1
fi
@@ -2188,21 +2207,16 @@ saltify() {
# Grab the version from the os-release file
local ubuntu_version
ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
if [ "$OSVER" != "xenial" ]; then local py_ver_url_path="/py3"; else local py_ver_url_path="/apt"; fi
case "$install_type" in
'FLEET')
if [[ $OSVER != 'xenial' ]]; then
retry 50 10 "apt-get -y install python3-mysqldb" >> "$setup_log" 2>&1 || exit 1
else
retry 50 10 "apt-get -y install python-mysqldb" >> "$setup_log" 2>&1 || exit 1
fi
retry 50 10 "apt-get -y install python3-mysqldb" >> "$setup_log" 2>&1 || exit 1
;;
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
# Add saltstack repo(s)
wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
wget -q --inet4-only -O - https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3003 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
# Add Docker repo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
@@ -2210,7 +2224,7 @@ saltify() {
# Get gpg keys
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
@@ -2234,7 +2248,7 @@ saltify() {
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3003/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
;;
esac
@@ -2243,11 +2257,7 @@ saltify() {
set_progress_str 8 'Installing salt-minion & python modules'
retry 50 10 "apt-get -y install salt-minion=3003+ds-1 salt-common=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
if [[ $OSVER != 'xenial' ]]; then
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb" >> "$setup_log" 2>&1 || exit 1
else
retry 50 10 "apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb python-packaging python-influxdb" >> "$setup_log" 2>&1 || exit 1
fi
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" >> "$setup_log" 2>&1 || exit 1
fi
}

95
setup/so-preflight Normal file → Executable file
View File

@@ -18,30 +18,61 @@
source ../salt/common/tools/sbin/so-common
source ./so-functions
preflight_log='/root/preflight.log'
script_run="$1"
if [[ $script_run == true ]]; then
preflight_log="${2:-'/root/preflight.log'}"
else
preflight_log='/root/preflight.log'
fi
check_default_repos() {
local ret_code=0
printf ' Checking OS default repos with ' | tee -a "$preflight_log"
if [[ $OS == 'centos' ]]; then
printf '%s' 'yum update.' | tee -a "$preflight_log"
echo "" >> "$preflight_log"
yum -y update >> $preflight_log 2>&1
ret_code=$?
local repo_str=' Checking OS default repos with '
if [[ $script_run == true ]]; then
printf '%s' "$repo_str"
else
printf '%s' 'apt update.' | tee -a "$preflight_log"
printf '%s' "$repo_str" | tee -a "$preflight_log"
fi
if [[ $OS == 'centos' ]]; then
if [[ $script_run == true ]]; then
printf '%s' 'yum update.'
else
printf '%s' 'yum update.' | tee -a "$preflight_log"
fi
echo "" >> "$preflight_log"
yum -y check-update >> $preflight_log 2>&1
ret_code=$?
if [[ $ret_code == 0 || $ret_code == 100 ]]; then
printf '%s\n' ' SUCCESS'
ret_code=0
else
printf '%s\n' ' FAILURE'
fi
else
if [[ $script_run == true ]]; then
printf '%s' 'apt update.'
else
printf '%s' 'apt update.' | tee -a "$preflight_log"
fi
echo "" >> "$preflight_log"
retry 50 10 "apt-get -y update" >> $preflight_log 2>&1
ret_code=$?
[[ $ret_code == 0 ]] && printf '%s\n' ' SUCCESS' || printf '%s\n' ' FAILURE'
fi
[[ $ret_code == 0 ]] && printf '%s\n' ' SUCCESS' || printf '%s\n' ' FAILURE'
return $ret_code
}
check_new_repos() {
printf ' Checking repo URLs added by setup.' | tee -a "$preflight_log"
local repo_url_str=' Checking repo URLs added by setup.'
if [[ $script_run == true ]]; then
printf '%s' "$repo_url_str"
else
printf '%s' "$repo_url_str" | tee -a "$preflight_log"
fi
if [[ $OS == 'centos' ]]; then
local repo_arr=(
@@ -54,11 +85,10 @@ check_new_repos() {
else
local ubuntu_version
ubuntu_version=$(grep VERSION_ID /etc/os-release 2> /dev/null | awk -F '[ "]' '{print $2}')
if [ "$OSVER" != "xenial" ]; then local py_ver_url_path="/py3"; else local py_ver_url_path="/apt"; fi
local repo_arr=(
"https://download.docker.com/linux/ubuntu/gpg"
"https://download.docker.com/linux/ubuntu"
"https://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
"https://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
"https://packages.wazuh.com"
)
@@ -71,9 +101,15 @@ check_new_repos() {
}
check_misc_urls() {
printf ' Checking various other URLs used by setup.' | tee -a "$preflight_log"
local misc_url_str=' Checking various other URLs used by setup.'
if [[ $script_run == true ]]; then
printf '%s' "$misc_url_str"
else
printf '%s' "$misc_url_str" | tee -a "$preflight_log"
fi
local so_version=$(cat ../VERSION)
local so_version
so_version=$(cat ../VERSION)
local url_arr=(
"https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
"https://github.com/Neo23x0/signature-base"
@@ -113,10 +149,18 @@ __check_url_arr() {
}
main() {
local intro_str="Beginning pre-flight checks."
local success_str="Pre-flight checks completed successfully!"
local fail_str="Pre-flight checks could not complete."
detect_os "$preflight_log"
[[ -f $preflight_log ]] || touch "$preflight_log"
echo "Beginning pre-flight checks." | tee "$preflight_log"
if [[ $script_run == true ]]; then
echo "$intro_str"
else
echo "$intro_str" | tee "$preflight_log"
fi
check_default_repos &&\
check_new_repos &&\
check_misc_urls
@@ -125,12 +169,23 @@ main() {
echo ""
if [[ $success == 0 ]]; then
echo -e "Pre-flight checks completed successfully!\n" | tee -a "$preflight_log"
if [[ $script_run == true ]]; then
echo "$success_str"
else
echo "$success_str" | tee -a "$preflight_log"
echo ""
fi
else
echo -e "Pre-flight checks could not complete." | tee -a "$preflight_log"
echo -e " Check $preflight_log for details.\n"
exit 1
if [[ $script_run == true ]]; then
echo "$fail_str"
else
echo "$fail_str" | tee -a "$preflight_log"
echo "Check $preflight_log for details."
echo ""
fi
fi
exit $success
}
main

View File

@@ -256,7 +256,8 @@ elif [ "$install_type" = 'HELIXSENSOR' ]; then
elif [ "$install_type" = 'IMPORT' ]; then
is_import=true
elif [ "$install_type" = 'ANALYST' ]; then
is_analyst=true
cd .. || exit 255
exec bash so-analyst-install
fi
if [[ $is_manager || $is_import ]]; then
@@ -264,14 +265,6 @@ if [[ $is_manager || $is_import ]]; then
fi
if ! [[ -f $install_opt_file ]]; then
# Check if this is an airgap install
if [[ ( $is_manager || $is_import || $is_minion ) && $is_iso ]]; then
whiptail_airgap
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
is_airgap=true
fi
fi
if [[ $is_manager && $is_sensor ]]; then
check_requirements "standalone"
elif [[ $is_fleet_standalone ]]; then
@@ -311,17 +304,29 @@ if ! [[ -f $install_opt_file ]]; then
add_mngr_ip_to_hosts
fi
if [[ $is_minion ]]; then
whiptail_ssh_key_copy_notice
copy_ssh_key >> $setup_log 2>&1
fi
# Check if this is an airgap install
if [[ ( $is_manager || $is_import) && $is_iso ]]; then
whiptail_airgap
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
is_airgap=true
fi
elif [[ $is_minion && $is_iso ]]; then
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" [[ -f /etc/yum.repos.d/airgap_repo.repo ]] >> $setup_log 2>&1
airgap_check=$?
[[ $airgap_check ]] && is_airgap=true >> $setup_log 2>&1
fi
reset_proxy
if [[ -z $is_airgap ]]; then
collect_net_method
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
fi
if [[ $is_minion ]]; then
whiptail_ssh_key_copy_notice
copy_ssh_key >> $setup_log 2>&1
fi
if [[ $is_minion ]] && ! (compare_versions); then
info "Installer version mismatch, downloading correct version from manager"
printf '%s\n' \
@@ -336,19 +341,31 @@ if ! [[ -f $install_opt_file ]]; then
download_repo_tarball
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
fi
if [[ $is_analyst ]]; then
cd .. || exit 255
exec bash so-analyst-install
fi
else
rm -rf $install_opt_file >> "$setup_log" 2>&1
fi
if [[ -z $is_airgap ]]; then
percentage=0
{
installer_progress_loop 'Running preflight checks...' &
progress_bg_proc=$!
./so-preflight true "$setup_log" >> $setup_log 2>&1
preflight_ret=$?
echo "$preflight_ret" > /tmp/preflight_ret
kill -9 "$progress_bg_proc"
wait "$progress_bg_proc" &> /dev/null
} | progress '...'
[[ -f /tmp/preflight_ret ]] && preflight_ret=$(cat /tmp/preflight_ret)
rm /tmp/preflight_ret
if [[ -n $preflight_ret && $preflight_ret -gt 0 ]] && ! ( whiptail_preflight_err ); then
whiptail_cancel
fi
fi
percentage=0
{
installer_progress_loop & # Run progress bar to 98 in ~8 minutes while waiting for package installs
installer_progress_loop 'Checking that all required packages are installed and enabled...' & # Run progress bar to 98 in ~8 minutes while waiting for package installs
progress_bg_proc=$!
installer_prereq_packages
install_success=$?
@@ -941,7 +958,6 @@ if [[ -n $SO_ERROR ]]; then
SKIP_REBOOT=1
whiptail_setup_failed
else
echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1
{
@@ -969,11 +985,16 @@ else
so-learn enable logscan --apply >> $setup_log 2>&1
fi
if [[ -n $ENDGAMEHOST ]]; then
set_progress_str 99 'Configuring firewall for Endgame SMP'
so-firewall --apply includehost endgame $ENDGAMEHOST >> $setup_log 2>&1
fi
} | whiptail_gauge_post_setup "Running post-installation steps..."
echo "Post-installation steps have completed. Awaiting user input to clean up installer." >> $setup_log 2>&1
whiptail_setup_complete
[[ $setup_type != 'iso' ]] && whitpail_ssh_warning
echo "Post-installation steps have completed." >> $setup_log 2>&1
fi
install_cleanup >> "$setup_log" 2>&1

View File

@@ -83,8 +83,8 @@ whiptail_bond_nics_mtu() {
}
whiptail_cancel() {
whiptail --title "$whiptail_title" --msgbox "Cancelling Setup." 8 75
[ -z "$TESTING" ] && whiptail --title "$whiptail_title" --msgbox "Cancelling Setup." 8 75
if [ -d "/root/installtmp" ]; then
{
echo "/root/installtmp exists";
@@ -95,7 +95,7 @@ whiptail_cancel() {
title "User cancelled setup."
exit
exit 1
}
whiptail_check_exitstatus() {
@@ -285,7 +285,7 @@ whiptail_storage_requirements() {
You need ${needed_val} to meet minimum requirements.
Visit https://docs.securityonion.net/en/2.1/hardware.html for more information.
Visit https://docs.securityonion.net/en/latest/hardware.html for more information.
Select YES to continue anyway, or select NO to cancel.
EOM
@@ -505,6 +505,8 @@ whiptail_end_settings() {
[[ -n $WEBUSER ]] && __append_end_msg "Web User: $WEBUSER"
[[ -n $FLEETNODEUSER ]] && __append_end_msg "Fleet User: $FLEETNODEUSER"
[[ -n $FLEETCUSTOMHOSTNAME ]] && __append_end_msg "Fleet Custom Hostname: $FLEETCUSTOMHOSTNAME"
if [[ $is_manager ]]; then
__append_end_msg "Enabled Optional Components:"
@@ -733,7 +735,7 @@ whiptail_install_type() {
# What kind of install are we doing?
install_type=$(whiptail --title "$whiptail_title" --radiolist \
"Choose install type:" 12 65 5 \
"Choose install type. See https://docs.securityonion.net/architecture for details." 12 65 5 \
"EVAL" "Evaluation mode (not for production) " ON \
"STANDALONE" "Standalone production install " OFF \
"DISTRIBUTED" "Distributed install submenu " OFF \
@@ -747,6 +749,11 @@ whiptail_install_type() {
if [[ $install_type == "DISTRIBUTED" ]]; then
whiptail_install_type_dist
if [[ $dist_option == "NEWDEPLOYMENT" ]]; then
whiptail_install_type_dist_new
else
whiptail_install_type_dist_existing
fi
elif [[ $install_type == "OTHER" ]]; then
whiptail_install_type_other
fi
@@ -757,13 +764,55 @@ whiptail_install_type() {
whiptail_install_type_dist() {
[ -n "$TESTING" ] && return
dist_option=$(whiptail --title "$whiptail_title" --menu "Do you want to start a new deployment or join this box to \nan existing deployment?" 11 75 2 \
"New Deployment " "Create a new Security Onion deployment" \
"Existing Deployment " "Join to an exisiting Security Onion deployment " \
3>&1 1>&2 2>&3
)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
dist_option=$(echo "${dist_option^^}" | tr -d ' ')
}
whiptail_install_type_dist_new() {
[ -n "$TESTING" ] && return
local mngr_msg
read -r -d '' mngr_msg <<- EOM
Choose a distributed manager type to start a new grid.
install_type=$(whiptail --title "$whiptail_title" --radiolist \
"Choose distributed node type:" 13 60 6 \
"MANAGER" "Start a new grid " ON \
"SENSOR" "Create a forward only sensor " OFF \
See https://docs.securityonion.net/architecture for details.
Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations.
EOM
install_type=$(whiptail --title "$whiptail_title" --radiolist "$mngr_msg" 15 75 2 \
"MANAGER" "New grid, requires separate search node(s) " ON \
"MANAGERSEARCH" "New grid, separate search node(s) are optional " OFF \
3>&1 1>&2 2>&3
)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_install_type_dist_existing() {
[ -n "$TESTING" ] && return
local node_msg
read -r -d '' node_msg <<- EOM
Choose a distributed node type to join to an existing grid.
See https://docs.securityonion.net/architecture for details.
Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users.
EOM
install_type=$(whiptail --title "$whiptail_title" --radiolist "$node_msg" 17 57 4 \
"SENSOR" "Create a forward only sensor " ON \
"SEARCHNODE" "Add a search node with parsing " OFF \
"MANAGERSEARCH" "Manager + search node " OFF \
"FLEET" "Dedicated Fleet Osquery Node " OFF \
"HEAVYNODE" "Sensor + Search Node " OFF \
3>&1 1>&2 2>&3
@@ -775,8 +824,6 @@ whiptail_install_type_dist() {
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
export install_type
}
whiptail_install_type_other() {
@@ -810,7 +857,6 @@ whiptail_invalid_input() { # TODO: This should accept a list of arguments to spe
[ -n "$TESTING" ] && return
whiptail --title "$whiptail_title" --msgbox " Invalid input, please try again." 7 40
}
whiptail_invalid_proxy() {
@@ -857,10 +903,21 @@ whiptail_invalid_user_warning() {
whiptail_invalid_hostname() {
[ -n "$TESTING" ] && return
local is_manager_hostname
is_manager_hostname="$1"
local error_message
error_message=$(echo "Please choose a valid hostname. It cannot be localhost; and must contain only \
the ASCII letters 'A-Z' and 'a-z' (case-sensitive), the digits '0' through '9', \
and hyphen ('-')" | tr -d '\t')
read -r -d '' error_message <<- EOM
Please choose a valid hostname. It cannot be localhost. It must contain only the ASCII letters 'A-Z' and 'a-z' (case-sensitive), the digits '0' through '9', and hyphen ('-').
EOM
if [[ $is_manager_hostname = 0 ]]; then
local error_message
read -r -d '' error_message <<- EOM
Please enter a valid hostname. The manager hostname cannot be localhost or the chosen hostname for this machine.
EOM
fi
whiptail --title "$whiptail_title" \
--msgbox "$error_message" 10 75
@@ -905,6 +962,7 @@ whiptail_first_menu_iso() {
option=$(echo "${option^^}" | tr -d ' ')
}
whiptail_make_changes() {
[ -n "$TESTING" ] && return
@@ -1487,6 +1545,20 @@ whiptail_patch_schedule_select_hours() {
}
whiptail_preflight_err() {
[ -n "$TESTING" ] && return 1
read -r -d '' message <<- EOM
The so-preflight script failed checking one or more URLs required by setup. Check $setup_log for more details.
Would you like to exit setup?
EOM
whiptail --title "$whiptail_title" \
--yesno "$message" 11 75 \
--yes-button "Continue" --no-button "Exit" --defaultno
}
whiptail_proxy_ask() {
[ -n "$TESTING" ] && return
@@ -1774,7 +1846,7 @@ whiptail_storage_requirements() {
You need ${needed_val} to meet minimum requirements.
Visit https://docs.securityonion.net/en/2.1/hardware.html for more information.
Visit https://docs.securityonion.net/en/latest/hardware.html for more information.
Press YES to continue anyway, or press NO to cancel.
EOM

View File

@@ -1,6 +1,6 @@
#!/bin/bash
. ../salt/common/tools/sbin/so-common
. "$(dirname "$0")"/../salt/common/tools/sbin/so-common
script_ret=0
@@ -106,7 +106,7 @@ test_fun 1 valid_dns_list "192.168.9."
sleep 0.15s
header "int (default min: 1, default max: 1000)"
header "int (default min: 1, default max: 1000000000)"
test_fun 0 valid_int "24"
@@ -114,9 +114,9 @@ test_fun 0 valid_int "1"
test_fun 0 valid_int "2" "2"
test_fun 0 valid_int "1000"
test_fun 0 valid_int "1000000000"
test_fun 1 valid_int "10001"
test_fun 1 valid_int "1000000001"
test_fun 1 valid_int "24" "" "20"