diff --git a/salt/bpf/defaults.yaml b/salt/bpf/defaults.yaml new file mode 100644 index 000000000..329537494 --- /dev/null +++ b/salt/bpf/defaults.yaml @@ -0,0 +1,4 @@ +bpf: + pcap: [] + suricata: [] + zeek: [] \ No newline at end of file diff --git a/salt/bpf/soc_bpf.yaml b/salt/bpf/soc_bpf.yaml new file mode 100644 index 000000000..62395830f --- /dev/null +++ b/salt/bpf/soc_bpf.yaml @@ -0,0 +1,7 @@ +bpf: + pcap: + description: List of BPF filters to apply to PCAP. + suricata: + description: List of BPF filters to apply to Suricata. + zeek: + description: List of BPF filters to apply to Zeek. diff --git a/salt/common/init.sls b/salt/common/init.sls index c391c127e..4bf779e2b 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -123,6 +123,13 @@ utilsyncscripts: - so-firewall - so-image-common - soup + - so-status + +so-status_script: + file.managed: + - name: /usr/sbin/so-status + - source: salt://common/tools/sbin/so-status + - mode: 755 {% if role in ['eval', 'standalone', 'sensor', 'heavynode'] %} # Add sensor cleanup @@ -192,9 +199,16 @@ sostatus_log: file.managed: - name: /opt/so/log/sostatus/status.log - mode: 644 - + +common_pip_dependencies: + pip.installed: + - user: root + - pkgs: + - rich + - target: /usr/lib64/python3.6/site-packages + # Install sostatus check cron -'/usr/sbin/so-status -q; echo $? > /opt/so/log/sostatus/status.log 2>&1': +'/usr/sbin/so-status -j > /opt/so/log/sostatus/status.log 2>&1': cron.present: - user: root - minute: '*/1' diff --git a/salt/common/tools/sbin/so-minion b/salt/common/tools/sbin/so-minion index 3bfd626ad..3a5fcec71 100755 --- a/salt/common/tools/sbin/so-minion +++ b/salt/common/tools/sbin/so-minion @@ -5,6 +5,10 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +if [ -f /usr/sbin/so-common ]; then + . /usr/sbin/so-common +fi + if [ "$(id -u)" -ne 0 ]; then echo "This script must be run using sudo!" exit 1 @@ -171,6 +175,8 @@ function add_sensor_to_minion() { echo " config:" >> $PILLARFILE echo " af-packet:" >> $PILLARFILE echo " threads: '$CORECOUNT'" >> $PILLARFILE + echo "pcap:" >> $PILLARFILE + echo " enabled: True" >> $PILLARFILE } function createSTANDALONE() { @@ -209,10 +215,12 @@ function createIDHNODE() { } function testConnection() { - salt "$MINION_ID" test.ping + retry 5 10 "salt '$MINION_ID' test.ping" 0 local ret=$? if [[ $ret != 0 ]]; then echo "The Minion has been accepted but is not online. Try again later" + echo "Deleting the key" + deleteminion exit 1 fi } diff --git a/salt/common/tools/sbin/so-status b/salt/common/tools/sbin/so-status index 596070e4e..5e1487311 100644 --- a/salt/common/tools/sbin/so-status +++ b/salt/common/tools/sbin/so-status @@ -1,301 +1,165 @@ -#!/bin/bash +#!/usr/bin/env python3 # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -if ! [ "$(id -u)" = 0 ]; then - echo "This command must be run as root" - exit 1 -fi +import json +import os +from rich import box +from rich.console import Console +from rich.table import Table +import subprocess +import sys +import time -display_help() { -cat < 2: + item = json.loads(line) + if item['Names'] == container: + details['Status'] = item['State'] + details['Details'] = item['Status'] + container_list.append(details) + exists = True + if item['State'] != "running": + code = 1 + break - mapfile -t expected_container_list < <(sort -u /opt/so/conf/so-status/so-status.conf | tr -d "#") + if not exists: + container_list.append(details) + code = 1 + return code, container_list -} -populate_container_lists() { - systemctl is-active --quiet docker +def check_status(options, console): + container_list = [] + code = check_system_status(options, console) + if code == 0: + code, container_list = check_container_status(options, console) + output(options, console, code, container_list) + return code - if [[ $? = 0 ]]; then - mapfile -t docker_raw_list < <(curl -s --unix-socket /var/run/docker.sock http:/v1.40/containers/json?all=1 \ - | jq -c '.[] | { Name: .Names[0], State: .State }' \ - | tr -d '/{"}') - else - exit 1 - fi - local container_name="" - local container_state="" +def main(): + options = [] + args = sys.argv[1:] + for option in args: + if option.startswith("-"): + options.append(option) + args.remove(option) - for line in "${docker_raw_list[@]}"; do - container_name="$( echo $line | sed -e 's/Name:\(.*\),State:\(.*\)/\1/' )" # Get value in the first search group (container names) - container_state="$( echo $line | sed -e 's/Name:\(.*\),State:\(.*\)/\2/' )" # Get value in the second search group (container states) + if len(args) != 0 or "-h" in options: + showUsage(options, None) - temp_container_name_list+=( "${container_name}" ) - temp_container_state_list+=( "${container_state}" ) - done + if os.environ["USER"] != "root": + fail("This program must be run as root") + + console = Console() + sys.exit(check_status(options, console)) - compare_lists -} -parse_status() { - local service_name=${1} - local container_state=${2} +if __name__ == "__main__": + main() - for state in "${GOOD_STATUSES[@]}"; do - [[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $SUCCESS_STRING && return 0 || [[ $container_state = "$state" ]] && return 0 - done - - for state in "${BAD_STATUSES[@]}"; do - [[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && [[ $QUIET = "false" ]] && printf $DISABLED_STRING && return 0 || [[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && return 0 - done - - # if a highstate has finished running since the system has started - # then the containers should be running so let's check the status - if [ $LAST_HIGHSTATE_END -ge $SYSTEM_START_TIME ]; then - - [[ $container_state = "missing" ]] && [[ $QUIET = "false" ]] && printf $MISSING_STRING && return 1 || [[ $container_state = "missing" ]] && [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1 - - for state in "${PENDING_STATUSES[@]}"; do - [[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $PENDING_STRING && return 0 - done - - # This is technically not needed since the default is error state - for state in "${BAD_STATUSES[@]}"; do - [[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $ERROR_STRING && return 1 || [[ $container_state = "$state" ]] && [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1 - done - - [[ $QUIET = "false" ]] && printf $ERROR_STRING && return 1 || [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1 - - # if a highstate has not run since system start time, but a highstate is currently running - # then show that the containers are STARTING - elif [[ "$HIGHSTATE_RUNNING" == 0 ]]; then - [[ $QUIET = "false" ]] && printf $STARTING_STRING && return 2 || EXITCODE=2 && return 2 - - # if a highstate has not finished running since system startup and isn't currently running - # then just show that the containers are WAIT_START; waiting to be started - else - [[ $QUIET = "false" ]] && printf $WAIT_START_STRING && return 2 || EXITCODE=2 && return 2 - - fi -} - -# {% raw %} - -print_line() { - local service_name=${1} - local service_state="$( parse_status ${1} ${2} )" - local columns=$(tput cols) - local state_color="\e[0m" - - local PADDING_CONSTANT=15 - - if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]]; then - state_color="\e[1;31m" - if [[ "$EXITCODE" -eq 0 ]]; then - EXITCODE=1 - fi - elif [[ $service_state = "$SUCCESS_STRING" ]]; then - state_color="\e[1;32m" - elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then - state_color="\e[1;33m" - EXITCODE=2 - fi - - printf " $service_name " - for i in $(seq 0 $(( $columns - $PADDING_CONSTANT - ${#service_name} - ${#service_state} ))); do - printf "${state_color}%b\e[0m" "-" - done - printf " [ " - printf "${state_color}%b\e[0m" "$service_state" - printf "%s \n" " ]" -} - -non_term_print_line() { - local service_name=${1} - local service_state="$( parse_status ${1} ${2} )" - - if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]]; then - if [[ "$EXITCODE" -eq 0 ]]; then - EXITCODE=1 - fi - elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then - EXITCODE=2 - fi - - printf " $service_name " - for i in $(seq 0 $(( 35 - ${#service_name} - ${#service_state} ))); do - printf "-" - done - printf " [ " - printf "$service_state" - printf "%s \n" " ]" -} - -main() { - - # if running from salt - if [ "$CALLER" == 'salt-call' ] || [ "$CALLER" == 'salt-minion' ]; then - printf "\n" - printf "Checking Docker status\n\n" - - systemctl is-active --quiet docker - if [[ $? = 0 ]]; then - non_term_print_line "Docker" "running" - else - non_term_print_line "Docker" "exited" - fi - - populate_container_lists - - printf "\n" - printf "Checking container statuses\n\n" - - local num_containers=${#container_name_list[@]} - - for i in $(seq 0 $(($num_containers - 1 ))); do - non_term_print_line ${container_name_list[$i]} ${container_state_list[$i]} - done - - printf "\n" - - # else if running from a terminal - else - - if [ "$QUIET" = true ]; then - if [ $SYSTEM_START_TIME -lt $LAST_SOSETUP_LOG ]; then - exit 99 - fi - print_or_parse="parse_status" - else - print_or_parse="print_line" - - local focus_color="\e[1;34m" - printf "\n" - printf "${focus_color}%b\e[0m" "Checking Docker status\n\n" - fi - - systemctl is-active --quiet docker - if [[ $? = 0 ]]; then - ${print_or_parse} "Docker" "running" - else - ${print_or_parse} "Docker" "exited" - fi - - populate_container_lists - - if [ "$QUIET" = false ]; then - printf "\n" - printf "${focus_color}%b\e[0m" "Checking container statuses\n\n" - fi - - local num_containers=${#container_name_list[@]} - - for i in $(seq 0 $(($num_containers - 1 ))); do - ${print_or_parse} ${container_name_list[$i]} ${container_state_list[$i]} - done - - if [ "$QUIET" = false ]; then - printf "\n" - fi - fi -} - -# {% endraw %} - -while getopts ':hq' OPTION; do - case "$OPTION" in - h) - display_help - exit 0 - ;; - q) - QUIET=true - ;; - \?) - display_help - exit 0 - ;; - esac -done - -main - -exit $EXITCODE \ No newline at end of file diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index bb22849e5..a14c03e2d 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -75,7 +75,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.apm_server@package" @@ -86,7 +86,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -109,7 +109,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.auditbeat@package" @@ -120,7 +120,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -143,7 +143,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.cloudbeat@package" @@ -154,7 +154,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -177,7 +177,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.endpoint_security@package" @@ -188,7 +188,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -211,7 +211,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.filebeat@package" @@ -222,7 +222,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -245,7 +245,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.fleet_server@package" @@ -256,7 +256,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -279,7 +279,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.heartbeat@package" @@ -290,7 +290,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -313,7 +313,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent@package" @@ -324,7 +324,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -347,7 +347,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.metricbeat@package" @@ -358,7 +358,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -381,7 +381,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.osquerybeat@package" @@ -392,7 +392,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false @@ -415,7 +415,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true composed_of: - "so-logs-elastic_agent.packetbeat@package" @@ -426,7 +426,7 @@ elasticsearch: _meta: package: name: elastic_agent - managed_by: fleet + managed_by: security_onion managed: true data_stream: hidden: false diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index bc33598f3..be28a3712 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -346,6 +346,15 @@ append_so-elasticsearch_so-status.conf: - name: /opt/so/conf/so-status/so-status.conf - text: so-elasticsearch +so-es-cluster-settings: + cmd.run: + - name: /usr/sbin/so-elasticsearch-cluster-settings + - cwd: /opt/so + - template: jinja + - require: + - docker_container: so-elasticsearch + - file: es_sync_scripts + so-elasticsearch-templates: cmd.run: - name: /usr/sbin/so-elasticsearch-templates-load diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-cluster-settings b/salt/elasticsearch/tools/sbin/so-elasticsearch-cluster-settings new file mode 100755 index 000000000..a72ccaf56 --- /dev/null +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-cluster-settings @@ -0,0 +1,36 @@ +#!/bin/bash +{% set ES = salt['pillar.get']('manager:mainip', '') %} +{% set MANAGER = salt['grains.get']('master') %} + +ELASTICSEARCH_PORT=9200 + +# Wait for ElasticSearch to come up, so that we can query for version infromation +echo -n "Waiting for ElasticSearch..." +COUNT=0 +ELASTICSEARCH_CONNECTED="no" +while [[ "$COUNT" -le 30 ]]; do + curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://localhost:"$ELASTICSEARCH_PORT" + if [ $? -eq 0 ]; then + ELASTICSEARCH_CONNECTED="yes" + echo "connected!" + break + else + ((COUNT+=1)) + sleep 1 + echo -n "." + fi +done +if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then + echo + echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'docker ps' \n -running 'sudo so-elastic-restart'" + echo + + exit +fi + +# Check to see if config already exists +CLUSTER_SETTINGS=$(so-elasticsearch-query _cluster/settings | jq .persistent.cluster.remote) +if [[ ! -z "$CLUSTER_SETTINGS" ]]; then + echo "Applying cross cluster search config..." + so-elasticsearch-query _cluster/settings -d "{\"persistent\": {\"cluster\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" -XPUT +fi diff --git a/salt/firewall/hostgroups/analyst b/salt/firewall/hostgroups/analyst new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/analyst_workstations b/salt/firewall/hostgroups/analyst_workstations new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/eval b/salt/firewall/hostgroups/eval new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/heavynodes b/salt/firewall/hostgroups/heavynodes new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/idh b/salt/firewall/hostgroups/idh new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/manager b/salt/firewall/hostgroups/manager new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/receivers b/salt/firewall/hostgroups/receivers new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/searchnodes b/salt/firewall/hostgroups/searchnodes new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/sensors b/salt/firewall/hostgroups/sensors new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/hostgroups/standalone b/salt/firewall/hostgroups/standalone new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/analyst b/salt/firewall/portgroups/analyst new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/analyst_workstations b/salt/firewall/portgroups/analyst_workstations new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/eval b/salt/firewall/portgroups/eval new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/heavynodes b/salt/firewall/portgroups/heavynodes new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/idh b/salt/firewall/portgroups/idh new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/manager b/salt/firewall/portgroups/manager new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/receivers b/salt/firewall/portgroups/receivers new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/searchnodes b/salt/firewall/portgroups/searchnodes new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/sensors b/salt/firewall/portgroups/sensors new file mode 100644 index 000000000..e69de29bb diff --git a/salt/firewall/portgroups/standalone b/salt/firewall/portgroups/standalone new file mode 100644 index 000000000..ea8f495f9 --- /dev/null +++ b/salt/firewall/portgroups/standalone @@ -0,0 +1,19 @@ +playbook +mysql +kibana +redis +influxdb +elasticsearch_rest +elasticsearch_node +docker_registry +yum +sensoroni +beats_5044 +beats_5644 +elastic_agent_control +elastic_agent_data +elasticsearch_rest +endgame +strelka_frontend +syslog +nginx \ No newline at end of file diff --git a/salt/firewall/ports/ports.yaml b/salt/firewall/ports/ports.yaml new file mode 100644 index 000000000..d26b373cb --- /dev/null +++ b/salt/firewall/ports/ports.yaml @@ -0,0 +1,78 @@ +firewall: + ports: + all: + tcp: + - '0:65535' + udp: + - '0:65535' + agrules: + tcp: + - 7788 + beats_5044: + tcp: + - 5044 + beats_5644: + tcp: + - 5644 + beats_5066: + tcp: + - 5066 + docker_registry: + tcp: + - 5000 + elasticsearch_node: + tcp: + - 9300 + elasticsearch_rest: + tcp: + - 9200 + elastic_agent_control: + tcp: + - 8220 + elastic_agent_data: + tcp: + - 5055 + endgame: + tcp: + - 3765 + influxdb: + tcp: + - 8086 + kibana: + tcp: + - 5601 + mysql: + tcp: + - 3306 + nginx: + tcp: + - 80 + - 443 + playbook: + tcp: + - 3200 + redis: + tcp: + - 6379 + - 9696 + salt_manager: + tcp: + - 4505 + - 4506 + sensoroni: + tcp: + - 443 + ssh: + tcp: + - 22 + strelka_frontend: + tcp: + - 57314 + syslog: + tcp: + - 514 + udp: + - 514 + yum: + tcp: + - 443 diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml new file mode 100644 index 000000000..02199bc79 --- /dev/null +++ b/salt/firewall/soc_firewall.yaml @@ -0,0 +1,77 @@ +firewall: + hostgroups: + analyst_workstations: + description: List of IP Addresses or CIDR blocks to allow analyst workstations. + file: True + global: True + title: Analyst Workstations + analyst: + description: List of IP Addresses or CIDR blocks to allow analyst connections. + file: True + global: True + title: Analysts + standalone: + description: List of IP Addresses or CIDR blocks to allow standalone connections. + file: True + global: True + title: Standalone + advanced: True + eval: + description: List of IP Addresses or CIDR blocks to allow eval connections. + file: True + global: True + title: Eval + advanced: True + idh: + description: List of IP Addresses or CIDR blocks to allow idh connections. + file: True + global: True + title: IDH Nodes + manager: + description: List of IP Addresses or CIDR blocks to allow manager connections. + file: True + global: True + title: Manager + advanced: True + heavynodes: + description: List of IP Addresses or CIDR blocks to allow heavynode connections. + file: True + global: True + title: Heavy Nodes + searchnodes: + description: List of IP Addresses or CIDR blocks to allow searchnode connections. + file: True + global: True + title: Search Nodes + sensors: + description: List of IP Addresses or CIDR blocks to allow Sensor connections. + file: True + global: True + title: Sensors + receivers: + description: List of IP Addresses or CIDR blocks to allow receiver connections. + file: True + global: True + title: Receivers + portgroups: + analyst: + description: List of ports for use with Analyst connections. + file: True + global: True + title: Analyst Ports + analyst_workstations: + description: List of ports for use with analyst workstations. + file: True + global: True + title: Analyst Workstation Ports + standalone: + description: List of ports for use with Standalone. + file: True + global: True + title: Standalone + ports: + ports__yaml: + description: Ports in YAML. + file: True + global: True + title: Ports \ No newline at end of file diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls index fa08125f5..d99ef17c6 100644 --- a/salt/idstools/init.sls +++ b/salt/idstools/init.sls @@ -2,14 +2,10 @@ # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. - +{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} -{% set VERSION = salt['pillar.get']('global:soversion') %} -{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} -{% set MANAGER = salt['grains.get']('master') %} -{% set ENGINE = salt['pillar.get']('global:mdengine') %} {% set proxy = salt['pillar.get']('manager:proxy') %} include: @@ -33,7 +29,7 @@ so-ruleupdatecron: so-idstools: docker_container.running: - - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }} + - image: {{ GLOBALS.manager }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }} - hostname: so-idstools - user: socore {% if proxy %} diff --git a/salt/idstools/rules/extraction.rules b/salt/idstools/rules/extraction.rules new file mode 100644 index 000000000..bccfc69d6 --- /dev/null +++ b/salt/idstools/rules/extraction.rules @@ -0,0 +1,26 @@ +# Extract all PDF mime type +alert http any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100000; rev:1;) +alert smtp any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100001; rev:1;) +alert nfs any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100002; rev:1;) +alert smb any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100003; rev:1;) +# Extract EXE/DLL file types +alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100004; rev:1;) +alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100005; rev:1;) +alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100006; rev:1;) +alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100007; rev:1;) +alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100008; rev:1;) +alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100009; rev:1;) +alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100010; rev:1;) +alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100011; rev:1;) + +# Extract all Zip files +alert http any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100012; rev:1;) +alert smtp any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100013; rev:1;) +alert nfs any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100014; rev:1;) +alert smb any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100015; rev:1;) + +# Extract Word Docs +alert http any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100016; rev:1;) +alert smtp any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100017; rev:1;) +alert nfs any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100018; rev:1;) +alert smb any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100019; rev:1;) \ No newline at end of file diff --git a/salt/idstools/rules/filters.rules b/salt/idstools/rules/filters.rules new file mode 100644 index 000000000..051d1913f --- /dev/null +++ b/salt/idstools/rules/filters.rules @@ -0,0 +1,11 @@ +# Start the filters at sid 1200000 +# Example of filtering out *google.com from being in the dns log. +#config dns any any -> any any (dns.query; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200000;) +# Example of filtering out *google.com from being in the http log. +#config http any any -> any any (http.host; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200001;) +# Example of filtering out someuseragent from being in the http log. +#config http any any -> any any (http.user_agent; content:"someuseragent"; config: logging disable, type tx, scope tx; sid:1200002;) +# Example of filtering out Google's certificate from being in the ssl log. +#config tls any any -> any any (tls.fingerprint; content:"4f:a4:5e:58:7e:d9:db:20:09:d7:b6:c7:ff:58:c4:7b:dc:3f:55:b4"; config: logging disable, type tx, scope tx; sid:1200003;) +# Example of filtering out a md5 of a file from being in the files log. +#config fileinfo any any -> any any (fileinfo.filemd5; content:"7a125dc69c82d5caf94d3913eecde4b5"; config: logging disable, type tx, scope tx; sid:1200004;) diff --git a/salt/idstools/rules/local.rules b/salt/idstools/rules/local.rules new file mode 100644 index 000000000..ac11dfa58 --- /dev/null +++ b/salt/idstools/rules/local.rules @@ -0,0 +1 @@ +# Add your custom Suricata rules in this file. \ No newline at end of file diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index 9b062c300..9f1867bb7 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -18,4 +18,23 @@ idstools: global: True modify: description: List of SIDS that are modified. - global: True \ No newline at end of file + global: True + rules: + local__rules: + description: This is where custom Suricata rules are entered. + file: True + global: True + advanced: True + title: Local Rules + filters__rules: + description: You can set custom filters for Suricata when using it for meta data creation. + file: True + global: True + advanced: True + title: Filter Rules + extraction__rules: + description: This is a list of mime types for file extraction when Suricata is used for meta data creation. + file: True + global: True + advanced: True + title: Extraction Rules \ No newline at end of file diff --git a/salt/logstash/pipelines/config/so/9101_output_osquery_livequery.conf.jinja b/salt/logstash/pipelines/config/so/9101_output_osquery_livequery.conf.jinja deleted file mode 100644 index 8d661b8cc..000000000 --- a/salt/logstash/pipelines/config/so/9101_output_osquery_livequery.conf.jinja +++ /dev/null @@ -1,37 +0,0 @@ -{%- set ES = salt['grains.get']('master') -%} -{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} -{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} - -filter { - if [type] =~ "live_query" { - - mutate { - rename => { - "[host][hostname]" => "computer_name" - } - } - - prune { - blacklist_names => ["host"] - } - - split { - field => "rows" - } - } -} - - -output { - if [type] =~ "live_query" { - elasticsearch { - pipeline => "osquery.live_query" - hosts => "{{ ES }}" - user => "{{ ES_USER }}" - password => "{{ ES_PASS }}" - index => "so-osquery" - ssl => true - ssl_certificate_verification => false - } - } -} diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 8979535e8..623dae701 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -55,6 +55,7 @@ http { ssl_session_cache shared:SSL:1m; ssl_session_timeout 10m; ssl_ciphers HIGH:!aNULL:!MD5; + ssl_ecdh_curve secp521r1:secp384r1; ssl_prefer_server_ciphers on; ssl_protocols TLSv1.2; } diff --git a/salt/pcap/config.map.jinja b/salt/pcap/config.map.jinja new file mode 100644 index 000000000..f335c9380 --- /dev/null +++ b/salt/pcap/config.map.jinja @@ -0,0 +1,3 @@ +{% import_yaml 'pcap/defaults.yaml' as pcap_defaults with context %} +{% set pcap_pillar = pillar.pcap %} +{% set PCAPMERGED = salt['defaults.merge'](pcap_defaults, pcap_pillar, in_place=False) %} diff --git a/salt/pcap/files/config b/salt/pcap/files/config deleted file mode 100644 index 24f9a579e..000000000 --- a/salt/pcap/files/config +++ /dev/null @@ -1,23 +0,0 @@ -{%- set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %} -{%- set DISKFREEPERCENTAGE = salt['pillar.get']('steno:diskfreepercentage', 10) %} -{%- set MAXFILES = salt['pillar.get']('steno:maxfiles', 30000) %} -{%- set BLOCKS = salt['pillar.get']('steno:blocks', 2048) %} -{%- set FILEMB = salt['pillar.get']('steno:filemb', 4096) %} -{%- set AIOPS = salt['pillar.get']('steno:aiops', 128) %} -{%- set THREADS = salt['pillar.get']('steno:threads', 1) %} -{ - "Threads": [ - { "PacketsDirectory": "/nsm/pcap", "IndexDirectory": "/nsm/pcapindex", "MaxDirectoryFiles": {{ MAXFILES }}, "DiskFreePercentage": {{ DISKFREEPERCENTAGE }} } - {%- if THREADS > 1 %} - {%- for i in range(2,THREADS+1) %} - , { "PacketsDirectory": "/nsm/pcap" , "IndexDirectory": "/nsm/pcapindex", "MaxDirectoryFiles": {{ MAXFILES }}, "DiskFreePercentage": {{ DISKFREEPERCENTAGE }} } - {%- endfor %} - {%- endif %} - ] - , "StenotypePath": "/usr/bin/stenotype" - , "Interface": "{{ INTERFACE }}" - , "Port": 1234 - , "Host": "127.0.0.1" - , "Flags": ["-v", "--blocks={{ BLOCKS }}", "--preallocate_file_mb={{ FILEMB }}", "--aiops={{ AIOPS }}", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}] - , "CertPath": "/etc/stenographer/certs" -} diff --git a/salt/pcap/files/config.jinja b/salt/pcap/files/config.jinja new file mode 100644 index 000000000..420d12639 --- /dev/null +++ b/salt/pcap/files/config.jinja @@ -0,0 +1,11 @@ +{ + "Threads": [ + { "PacketsDirectory": "/nsm/pcap", "IndexDirectory": "/nsm/pcapindex", "MaxDirectoryFiles": {{ PCAPMERGED.pcap.config.maxdirectoryfiles }}, "DiskFreePercentage": {{ PCAPMERGED.pcap.config.diskfreepercentage }} } + ] + , "StenotypePath": "/usr/bin/stenotype" + , "Interface": "{{ pillar.sensor.interface }}" + , "Port": 1234 + , "Host": "127.0.0.1" + , "Flags": ["-v", "--blocks={{ PCAPMERGED.pcap.config.blocks }}", "--preallocate_file_mb={{ PCAPMERGED.pcap.config.preallocate_file_mb }}", "--aiops={{ PCAPMERGED.pcap.config.aiops }}", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}] + , "CertPath": "/etc/stenographer/certs" +} diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index 2d047e731..a5fd5da68 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -7,12 +7,13 @@ {% if sls in allowed_states %} {% from "pcap/map.jinja" import STENOOPTIONS with context %} +{% from "pcap/config.map.jinja" import PCAPMERGED with context %} {% set VERSION = salt['pillar.get']('global:soversion') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %} -{% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %} +{% set BPF_STENO = salt['pillar.get']('bpf:pcap', None) %} {% set BPF_COMPILED = "" %} # PCAP Section @@ -52,12 +53,13 @@ bpfcompilationfailure: stenoconf: file.managed: - name: /opt/so/conf/steno/config - - source: salt://pcap/files/config + - source: salt://pcap/files/config.jinja - user: stenographer - group: stenographer - mode: 644 - template: jinja - defaults: + PCAPMERGED: {{ PCAPMERGED }} BPF_COMPILED: "{{ BPF_COMPILED }}" stenoca: diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index 2fe385de5..4b545f5e0 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -1,10 +1,6 @@ -{%- set URLBASE = salt['pillar.get']('global:url_base') %} -{%- set DESCRIPTION = salt['pillar.get']('sensoroni:node_description', '') %} -{%- set MODEL = salt['grains.get']('sosmodel', '') %} -{%- set ADDRESS = salt['pillar.get']('sensoroni:node_address') %} +{%- from 'vars/globals.map.jinja' import GLOBALS -%} {%- set ANALYZE_TIMEOUT_MS = salt['pillar.get']('sensoroni:analyze_timeout_ms', 900000) %} {%- set ANALYZE_PARALLEL_LIMIT = salt['pillar.get']('sensoroni:analyze_parallel_limit', 5) %} -{%- set SENSORONIKEY = salt['pillar.get']('sensoroni:sensoronikey', '') %} {%- set CHECKININTERVALMS = salt['pillar.get']('sensoroni:node_checkin_interval_ms', 10000) %} {%- set ROLE = grains.id.split('_') | last %} {%- if ROLE in ['eval', 'standalone', 'sensor', 'heavynode'] %} @@ -23,13 +19,13 @@ "logFilename": "/opt/sensoroni/logs/sensoroni.log", "logLevel":"info", "agent": { - "nodeId": "{{ grains.host | lower }}", - "role": "{{ grains.role }}", - "description": {{ DESCRIPTION | tojson }}, - "address": "{{ ADDRESS }}", - "model": "{{ MODEL }}", + "nodeId": "{{ GLOBALS.hostname | lower }}", + "role": "{{ GLOBALS.role }}", + "description": {{ GLOBALS.description | tojson }}, + "address": "{{ GLOBALS.node_ip }}", + "model": "{{ GLOBALS.so_model }}", "pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }}, - "serverUrl": "https://{{ URLBASE }}/sensoroniagents", + "serverUrl": "https://{{ GLOBALS.url_base }}/sensoroniagents", "verifyCert": false, "modules": { {%- if ANALYZEENABLED %} @@ -40,7 +36,7 @@ {%- endif %} "importer": {}, "statickeyauth": { - "apiKey": "{{ SENSORONIKEY }}" + "apiKey": "{{ GLOBALS.sensoroni_key }}" {%- if STENOENABLED %} }, "stenoquery": { diff --git a/salt/sensoroni/init.sls b/salt/sensoroni/init.sls index 6d49d33ab..7e0aaa9aa 100644 --- a/salt/sensoroni/init.sls +++ b/salt/sensoroni/init.sls @@ -1,6 +1,4 @@ -{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} -{% set MANAGER = salt['grains.get']('master') %} +{% from 'vars/globals.map.jinja' import GLOBALS %} sensoroniconfdir: file.directory: @@ -43,7 +41,7 @@ analyzerscripts: so-sensoroni: docker_container.running: - - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }} + - image: {{ GLOBALS.manager }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }} - network_mode: host - binds: - /opt/so/conf/steno/certs:/etc/stenographer/certs:rw diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 57afc11e3..f16f5da87 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -6,11 +6,13 @@ soc: description: Customize the login page with a specific markdown-formatted message. file: True global: True + syntax: md motd__md: title: Overview Page description: Customize the overview page with specific markdown-formatted content. Images can be used but must be hosted from another host that is accessible by the users' browser. file: True global: True + syntax: md custom__js: title: Custom Javascript description: Customize SOC UI behavior with custom Javascript code. Custom Javascript not provided by Security Onion Solutions is unsupported, and should be removed prior to requesting support and prior to performing upgrades. diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index a46f7425b..5f628cbdd 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -12,7 +12,7 @@ {% set VERSION = salt['pillar.get']('global:soversion') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{% set BPF_NIDS = salt['pillar.get']('nids:bpf') %} +{% set BPF_NIDS = salt['pillar.get']('bpf:suricata', None) %} {% set BPF_STATUS = 0 %} {# import_yaml 'suricata/files/defaults2.yaml' as suricata #} diff --git a/salt/telegraf/scripts/sostatus.sh b/salt/telegraf/scripts/sostatus.sh index 567e6b027..c30220fff 100644 --- a/salt/telegraf/scripts/sostatus.sh +++ b/salt/telegraf/scripts/sostatus.sh @@ -11,10 +11,11 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then SOSTATUSLOG=/var/log/sostatus/status.log - SOSTATUSSTATUS=$(cat /var/log/sostatus/status.log) + SOSTATUSCODE=$(jq -r .status_code $SOSTATUSLOG) + SOSTATUSJSON=$(cat $SOSTATUSLOG | sed 's/"/\\"/g') if [ -f "$SOSTATUSLOG" ]; then - echo "sostatus status=$SOSTATUSSTATUS" + echo "sostatus status=$SOSTATUSCODE,json=\"$SOSTATUSJSON\"" else exit 0 fi diff --git a/salt/vars/globals.map.jinja b/salt/vars/globals.map.jinja index 9a6dd7f33..487cdf781 100644 --- a/salt/vars/globals.map.jinja +++ b/salt/vars/globals.map.jinja @@ -20,7 +20,10 @@ 'pipeline': INIT.PILLAR.global.pipeline, 'so_version': INIT.PILLAR.global.soversion, 'url_base': INIT.PILLAR.global.url_base, + 'so_model': INIT.GRAINS.get('sosmodel',''), + 'description': INIT.PILLAR.sensoroni.get('node_description',''), 'docker_range': INIT.PILLAR.docker.range, + 'sensoroni_key': INIT.PILLAR.sensoroni.sensoronikey, 'application_urls': {}, 'manager_roles': [ 'so-eval', diff --git a/salt/zeek/policy/custom/filters/conn b/salt/zeek/policy/custom/filters/conn new file mode 100644 index 000000000..e9181cc1e --- /dev/null +++ b/salt/zeek/policy/custom/filters/conn @@ -0,0 +1,19 @@ +module Filterconn; + +export { + global ignore_services: set[string] = {"dns", "krb", "krb_tcp"}; + } + +hook Conn::log_policy(rec: Conn::Info, id: Log::ID, filter: Log::Filter) + { + # Record only connections not in the ignored services + if ( ! rec?$service || rec$service in ignore_services ) + break; + } + +event zeek_init() +{ + Log::remove_default_filter(Conn::LOG); + local filter: Log::Filter = [$name="conn-filter"]; + Log::add_filter(Conn::LOG, filter); +} \ No newline at end of file diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index adb534281..a48ec20dc 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -17,6 +17,13 @@ zeek: CompressLogs: description: Enable compression of zeek logs. If you are seeing packet loss at the top of the hour in zeek or pcap you might need to set this to 0. This will use more disk space but save IO and CPU. policy: + custom: + filters: + conn: + description: Conn Filter for Zeek. This is an advanced setting and will take further action to enable. + file: True + global: True + advanced: True file_extraction: description: This is a list of mime types Zeek will extract from the network streams. load: diff --git a/setup/so-functions b/setup/so-functions index 7bba91092..20340436e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1810,7 +1810,7 @@ drop_install_options() { # Drop the install Variable echo "MAINIP=$MAINIP" > /opt/so/install.txt echo "MNIC=$MNIC" >> /opt/so/install.txt - echo "NODE_DESCRIPTION=$NODE_DESCRIPTION" >> /opt/so/install.txt + echo "NODE_DESCRIPTION='$NODE_DESCRIPTION'" >> /opt/so/install.txt echo "ES_HEAP_SIZE=$ES_HEAP_SIZE" >> /opt/so/install.txt echo "PATCHSCHEDULENAME=$PATCHSCHEDULENAME" >> /opt/so/install.txt echo "INTERFACE=$INTERFACE" >> /opt/so/install.txt diff --git a/setup/so-variables b/setup/so-variables index 214fa6b6f..5acbc01bc 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -81,7 +81,7 @@ export whiptail_title mkdir -p $local_salt_dir/pillar/minions -for THEDIR in elasticsearch firewall redis backup strelka sensoroni curator soc soctopus docker zeek suricata nginx filebeat logstash soc manager kratos idstools idh elastalert +for THEDIR in bpf pcap elasticsearch firewall redis backup strelka sensoroni curator soc soctopus docker zeek suricata nginx filebeat logstash soc manager kratos idstools idh elastalert do mkdir -p $local_salt_dir/pillar/$THEDIR touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls