Merge pull request #8310 from Security-Onion-Solutions/dev

2.3.140
This commit is contained in:
Mike Reeves
2022-07-18 11:23:54 -04:00
committed by GitHub
55 changed files with 452 additions and 176 deletions

24
.github/workflows/contrib.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: contrib
on:
issue_comment:
types: [created]
pull_request_target:
types: [opened,closed,synchronize]
jobs:
CLAssistant:
runs-on: ubuntu-latest
steps:
- name: "Contributor Check"
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
uses: cla-assistant/github-action@v2.1.3-beta
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PERSONAL_ACCESS_TOKEN : ${{ secrets.PERSONAL_ACCESS_TOKEN }}
with:
path-to-signatures: 'signatures_v1.json'
path-to-document: 'https://securityonionsolutions.com/cla'
allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,weslambert,defensivedepth,m0duspwnens
remote-organization-name: Security-Onion-Solutions
remote-repository-name: licensing

View File

@@ -12,6 +12,6 @@ jobs:
fetch-depth: '0'
- name: Gitleaks
uses: zricethezav/gitleaks-action@master
uses: gitleaks/gitleaks-action@v1.6.0
with:
config-path: .github/.gitleaks.toml

View File

@@ -1,6 +1,6 @@
## Security Onion 2.3.130
## Security Onion 2.3.140
Security Onion 2.3.130 is here!
Security Onion 2.3.140 is here!
## Screenshots

View File

@@ -1,18 +1,18 @@
### 2.3.130-20220607 ISO image built on 2022/06/07
### 2.3.140-20220718 ISO image built on 2022/07/18
### Download and Verify
2.3.130-20220607 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso
2.3.140-20220718 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.140-20220718.iso
MD5: 0034D6A9461C04357AFF512875408A4C
SHA1: BF80EEB101C583153CAD8E185A7DB3173FD5FFE8
SHA256: 15943623B96D8BB4A204A78668447F36B54A63ABA5F8467FBDF0B25C5E4E6078
MD5: 9570065548DBFA6230F28FF623A8B61A
SHA1: D48B2CC81DF459C3EBBC0C54BD9AAFAB4327CB75
SHA256: 0E31E15EDFD3392B9569FCCAF1E4518432ECB0D7A174CCA745F2F22CDAC4A034
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.130-20220607.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.140-20220718.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.130-20220607.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.140-20220718.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.140-20220718.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.3.130-20220607.iso.sig securityonion-2.3.130-20220607.iso
gpg --verify securityonion-2.3.140-20220718.iso.sig securityonion-2.3.140-20220718.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Tue 07 Jun 2022 01:27:20 PM EDT using RSA key ID FE507013
gpg: Signature made Mon 18 Jul 2022 10:16:05 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.3.130
2.3.140

View File

@@ -14,4 +14,5 @@ logstash:
- so/9700_output_strelka.conf.jinja
- so/9800_output_logscan.conf.jinja
- so/9801_output_rita.conf.jinja
- so/9802_output_kratos.conf.jinja
- so/9900_output_endgame.conf.jinja

View File

@@ -29,7 +29,7 @@ fi
interface="$1"
shift
sudo tcpdump -i $interface -ddd $@ | tail -n+2 |
tcpdump -i $interface -ddd $@ | tail -n+2 |
while read line; do
cols=( $line )
printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}

View File

@@ -49,19 +49,18 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
fi
echo "Testing to see if the pipelines are already applied"
ESVER=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" |jq .version.number |tr -d \")
PIPELINES=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"/_ingest/pipeline/filebeat-$ESVER-suricata-eve-pipeline | jq . | wc -c)
PIPELINES=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"/_ingest/pipeline/filebeat-$ESVER-elasticsearch-server-pipeline | jq . | wc -c)
if [[ "$PIPELINES" -lt 5 ]]; then
if [[ "$PIPELINES" -lt 5 ]] || [ "$2" != "--force" ]; then
echo "Setting up ingest pipeline(s)"
for MODULE in activemq apache auditd aws azure barracuda bluecoat cef checkpoint cisco coredns crowdstrike cyberark cylance elasticsearch envoyproxy f5 fortinet gcp google_workspace googlecloud gsuite haproxy ibmmq icinga iis imperva infoblox iptables juniper kafka kibana logstash microsoft mongodb mssql mysql nats netscout nginx o365 okta osquery panw postgresql rabbitmq radware redis santa snort snyk sonicwall sophos squid suricata system threatintel tomcat traefik zeek zscaler
do
echo "Loading $MODULE"
docker exec -i so-filebeat filebeat setup modules -pipelines -modules $MODULE -c $FB_MODULE_YML
sleep 2
done
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
{%- for module in MODULESMERGED.modules.keys() %}
{%- for fileset in MODULESMERGED.modules[module] %}
echo "{{ module }}.{{ fileset}}"
docker exec -i so-filebeat filebeat setup --pipelines --modules {{ module }} -M "{{ module }}.{{ fileset }}.enabled=true" -c $FB_MODULE_YML
sleep 0.5
{% endfor %}
{%- endfor %}
else
exit 0
fi

View File

@@ -16,6 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import subprocess
import sys
import time
@@ -26,6 +27,7 @@ hostgroupsFilename = "/opt/so/saltstack/local/salt/firewall/hostgroups.local.yam
portgroupsFilename = "/opt/so/saltstack/local/salt/firewall/portgroups.local.yaml"
defaultPortgroupsFilename = "/opt/so/saltstack/default/salt/firewall/portgroups.yaml"
supportedProtocols = ['tcp', 'udp']
readonly = False
def showUsage(options, args):
print('Usage: {} [OPTIONS] <COMMAND> [ARGS...]'.format(sys.argv[0]))
@@ -70,10 +72,26 @@ def checkApplyOption(options):
return apply(None, None)
def loadYaml(filename):
global readonly
file = open(filename, "r")
return yaml.safe_load(file.read())
content = file.read()
# Remove Jinja templating (for read-only operations)
if "{%" in content or "{{" in content:
content = content.replace("{{ ssh_port }}", "22")
pattern = r'.*({%|{{|}}|%}).*'
content = re.sub(pattern, "", content)
readonly = True
return yaml.safe_load(content)
def writeYaml(filename, content):
global readonly
if readonly:
raise Exception("Cannot write yaml file that has been flagged as read-only")
file = open(filename, "w")
return yaml.dump(content, file)

View File

@@ -1,6 +1,7 @@
#!/bin/bash
. /usr/sbin/so-common
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "{{ ELASTICCURL }}"
wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "{{ ELASTICCURL }}"
## This hackery will be removed if using Elastic Auth ##
# Let's snag a cookie from Kibana
@@ -12,6 +13,6 @@ echo "Setting up default Space:"
{% if HIGHLANDER %}
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log
{% else %}
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet","fleetv2","securitySolutionCases"]} ' >> /opt/so/log/kibana/misc.log
{% endif %}
echo

View File

@@ -238,7 +238,7 @@ function syncElastic() {
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "kibana_system" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
@@ -437,7 +437,7 @@ function updateStatus() {
state="inactive"
fi
body="{ \"schema_id\": \"$schemaId\", \"state\": \"$state\", \"traits\": $traitBlock }"
response=$(curl -fSsL -XPUT "${kratosUrl}/identities/$identityId" -d "$body")
response=$(curl -fSsL -XPUT -H "Content-Type: application/json" "${kratosUrl}/identities/$identityId" -d "$body")
[[ $? != 0 ]] && fail "Unable to update user"
}

View File

@@ -48,7 +48,7 @@ fi
{% else %}
gh_status=$(curl -s -o /dev/null -w "%{http_code}" http://github.com)
gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com)
clone_dir="/tmp"
if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then

View File

@@ -377,6 +377,35 @@ enable_highstate() {
echo ""
}
es_version_check() {
CHECK_ES=$(echo $INSTALLEDVERSION | awk -F. '{print $3}')
if [ "$CHECK_ES" -lt "110" ]; then
echo "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version 2.3.130 before updating to 2.3.140 or higher."
echo ""
echo "If your deployment has Internet access, you can use the following command to update to 2.3.130:"
echo "sudo BRANCH=2.3.130-20220607 soup"
echo ""
echo "Otherwise, if your deployment is configured for airgap, you can instead download the 2.3.130 ISO image from https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso."
echo ""
echo "*** Once you have updated to 2.3.130, you can then update to 2.3.140 or higher as you would normally. ***"
exit 1
fi
}
es_indices_check() {
echo "Checking for unsupported Elasticsearch indices..."
UNSUPPORTED_INDICES=$(for INDEX in $(so-elasticsearch-indices-list | awk '{print $3}'); do so-elasticsearch-query $INDEX/_settings?human |grep '"created_string":"6' | jq -r 'keys'[0]; done)
if [ -z "$UNSUPPORTED_INDICES" ]; then
echo "No unsupported indices found."
else
echo "The following indices were created with Elasticsearch 6, and are not supported when upgrading to Elasticsearch 8. These indices may need to be deleted, migrated, or re-indexed before proceeding with the upgrade. Please see https://docs.securityonion.net/en/2.3/soup.html#elastic-8 for more details."
echo
echo "$UNSUPPORTED_INDICES"
exit 1
fi
}
generate_and_clean_tarballs() {
local new_version
new_version=$(cat $UPDATE_DIR/VERSION)
@@ -422,8 +451,9 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
[[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100
[[ "$INSTALLEDVERSION" == 2.3.100 ]] && up_to_2.3.110
[[ "$INSTALLEDVERISON" == 2.3.110 ]] && up_to_2.3.120
[[ "$INSTALLEDVERISON" == 2.3.120 ]] && up_to_2.3.130
[[ "$INSTALLEDVERSION" == 2.3.110 ]] && up_to_2.3.120
[[ "$INSTALLEDVERSION" == 2.3.120 ]] && up_to_2.3.130
[[ "$INSTALLEDVERSION" == 2.3.130 ]] && up_to_2.3.140
true
}
@@ -439,6 +469,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110
[[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120
[[ "$POSTVERSION" == 2.3.120 ]] && post_to_2.3.130
[[ "$POSTVERSION" == 2.3.130 ]] && post_to_2.3.140
true
@@ -515,6 +546,14 @@ post_to_2.3.130() {
POSTVERSION=2.3.130
}
post_to_2.3.140() {
echo "Post Processing for 2.3.140"
FORCE_SYNC=true so-user sync
so-kibana-restart
so-kibana-space-defaults
POSTVERSION=2.3.140
}
stop_salt_master() {
@@ -762,10 +801,13 @@ up_to_2.3.100() {
echo "Adding receiver to assigned_hostgroups.local.map.yaml"
grep -qxF " receiver:" /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml || sed -i -e '$a\ receiver:' /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml
INSTALLEDVERSION=2.3.100
}
up_to_2.3.110() {
sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls
INSTALLEDVERSION=2.3.110
}
up_to_2.3.120() {
@@ -773,11 +815,52 @@ up_to_2.3.120() {
so-thehive-stop
so-thehive-es-stop
so-cortex-stop
INSTALLEDVERSION=2.3.120
}
up_to_2.3.130() {
# Remove file for nav update
rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json
INSTALLEDVERSION=2.3.130
}
up_to_2.3.140() {
## Deleting Elastalert indices to prevent issues with upgrade to Elastic 8 ##
echo "Deleting Elastalert indices to prevent issues with upgrade to Elastic 8..."
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query / -k --output /dev/null
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit 1
fi
# Delete Elastalert indices
for i in $(so-elasticsearch-query _cat/indices | grep elastalert | awk '{print $3}'); do so-elasticsearch-query $i -XDELETE; done
# Check to ensure Elastalert indices have been deleted
RESPONSE=$(so-elasticsearch-query elastalert*)
if [[ "$RESPONSE" == "{}" ]]; then
echo "Elastalert indices have been deleted."
else
fail "Something went wrong. Could not delete the Elastalert indices. Exiting."
fi
##
INSTALLEDVERSION=2.3.140
}
verify_upgradespace() {
@@ -958,7 +1041,7 @@ update_repo() {
fi
rm -f /etc/apt/sources.list.d/salt.list
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt $OSVER main" > /etc/apt/sources.list.d/saltstack.list
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list
apt-get update
fi
}
@@ -1093,6 +1176,8 @@ main() {
fi
echo "Verifying we have the latest soup script."
verify_latest_update_script
es_version_check
es_indices_check
echo ""
set_palette
check_elastic_license

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-kratos:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close kratos indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-kratos.*|so-kratos.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kratos:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete kratos indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-kratos.*|so-kratos.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kratos:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-kratos
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -23,22 +23,21 @@ read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
{% from 'filebeat/map.jinja' import THIRDPARTY with context %}
{% from 'filebeat/map.jinja' import SO with context %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
/usr/sbin/so-curator-closed-delete > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kibana-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1;
{% for INDEX in THIRDPARTY.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1;
{% endfor -%}
{% for INDEX in SO.modules.keys() -%}
{% for INDEX in MODULESMERGED.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
{% endfor -%}

View File

@@ -24,21 +24,18 @@ read lastPID < $lf
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
{% from 'filebeat/map.jinja' import THIRDPARTY with context %}
{% from 'filebeat/map.jinja' import SO with context %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1;
{% for INDEX in THIRDPARTY.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1;
{% endfor -%}
{% for INDEX in SO.modules.keys() -%}
{% for INDEX in MODULESMERGED.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
{% endfor -%}

View File

@@ -24,21 +24,18 @@ read lastPID < $lf
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
{% from 'filebeat/map.jinja' import THIRDPARTY with context %}
{% from 'filebeat/map.jinja' import SO with context %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-delete.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-delete.yml > /dev/null 2>&1;
{% for INDEX in THIRDPARTY.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-delete.yml > /dev/null 2>&1;
{% endfor -%}
{% for INDEX in SO.modules.keys() -%}
{% for INDEX in MODULESMERGED.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-delete.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
{% endfor -%}

View File

@@ -24,21 +24,18 @@ read lastPID < $lf
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
{% from 'filebeat/map.jinja' import THIRDPARTY with context %}
{% from 'filebeat/map.jinja' import SO with context %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-warm.yml > /dev/null 2>&1;
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-warm.yml > /dev/null 2>&1;
{% for INDEX in THIRDPARTY.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-warm.yml > /dev/null 2>&1;
{% endfor -%}
{% for INDEX in SO.modules.keys() -%}
{% for INDEX in MODULESMERGED.modules.keys() -%}
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-warm.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
{% endfor -%}

View File

@@ -201,8 +201,8 @@ so-curatorclusterclose:
cron.present:
- name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1
- user: root
- minute: '2'
- hour: '*/1'
- minute: '5'
- hour: '1'
- daymonth: '*'
- month: '*'
- dayweek: '*'
@@ -211,8 +211,8 @@ so-curatorclusterdelete:
cron.present:
- name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-delete.log 2>&1
- user: root
- minute: '2'
- hour: '*/1'
- minute: '5'
- hour: '1'
- daymonth: '*'
- month: '*'
- dayweek: '*'
@@ -221,8 +221,8 @@ so-curatorclusterwarm:
cron.present:
- name: /usr/sbin/so-curator-cluster-warm > /opt/so/log/curator/cron-warm.log 2>&1
- user: root
- minute: '2'
- hour: '*/1'
- minute: '5'
- hour: '1'
- daymonth: '*'
- month: '*'
- dayweek: '*'

View File

@@ -129,6 +129,9 @@ so-elastalert:
- file: elastaconf
- watch:
- file: elastaconf
- onlyif:
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
append_so-elastalert_so-status.conf:
file.append:

View File

@@ -53,9 +53,6 @@ elasticsearch:
script:
max_compilations_rate: 20000/1m
indices:
query:
bool:
max_clause_count: 3500
id_field_data:
enabled: false
logger:

View File

@@ -51,9 +51,10 @@
},
{ "set": { "field": "_index", "value": "so-firewall", "override": true } },
{ "set": { "if": "ctx.network?.transport_id == '0'", "field": "network.transport", "value": "icmp", "override": true } },
{"community_id": {} },
{ "set": { "field": "module", "value": "pfsense", "override": true } },
{ "set": { "field": "dataset", "value": "firewall", "override": true } },
{ "community_id": {} },
{ "set": { "field": "module", "value": "pfsense", "override": true } },
{ "set": { "field": "dataset", "value": "firewall", "override": true } },
{ "set": { "field": "category", "value": "network", "override": true } },
{ "remove": { "field": ["real_message", "ip_sub_msg", "firewall.sub_message"], "ignore_failure": true } }
]
}

View File

@@ -0,0 +1,13 @@
{
"description" : "kratos",
"processors" : [
{
"set": {
"field": "_index",
"value": "so-kratos",
"override": true
}
},
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -30,7 +30,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query -k --output /dev/null --silent --head --fail
so-elasticsearch-query / -k --output /dev/null --silent --head --fail
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"

View File

@@ -64,6 +64,9 @@ logging.files:
# automatically rotated
rotateeverybytes: 10485760 # = 10MB
# Rotate on startup
rotateonstartup: false
# Number of rotated log files to keep. Oldest files will be deleted first.
keepfiles: 7
@@ -114,7 +117,8 @@ filebeat.inputs:
fields_under_root: true
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
- type: log
- type: filestream
id: logscan
paths:
- /logs/logscan/alerts.log
fields:
@@ -131,7 +135,8 @@ filebeat.inputs:
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %}
{%- if ZEEKVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %}
- type: log
- type: filestream
id: zeek-{{ LOGNAME }}
paths:
- /nsm/zeek/logs/current/{{ LOGNAME }}.log
fields:
@@ -146,7 +151,8 @@ filebeat.inputs:
clean_removed: true
close_removed: false
- type: log
- type: filestream
id: import-zeek={{ LOGNAME }}
paths:
- /nsm/import/*/zeek/logs/{{ LOGNAME }}.log
fields:
@@ -170,7 +176,8 @@ filebeat.inputs:
{%- endfor %}
{%- endif %}
- type: log
- type: filestream
id: suricata-eve
paths:
- /nsm/suricata/eve*.json
fields:
@@ -186,7 +193,8 @@ filebeat.inputs:
clean_removed: false
close_removed: false
- type: log
- type: filestream
id: import-suricata
paths:
- /nsm/import/*/suricata/eve*.json
fields:
@@ -208,7 +216,8 @@ filebeat.inputs:
clean_removed: false
close_removed: false
{%- if STRELKAENABLED == 1 %}
- type: log
- type: filestream
id: strelka
paths:
- /nsm/strelka/log/strelka.log
fields:
@@ -229,7 +238,8 @@ filebeat.inputs:
{%- if WAZUHENABLED == 1 %}
- type: log
- type: filestream
id: wazuh
paths:
- /wazuh/archives/archives.json
fields:
@@ -247,7 +257,8 @@ filebeat.inputs:
{%- if FLEETMANAGER or FLEETNODE %}
- type: log
- type: filestream
id: osquery
paths:
- /nsm/osquery/fleet/result.log
fields:
@@ -317,13 +328,13 @@ filebeat.inputs:
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
- type: log
- type: filestream
id: kratos
paths:
- /logs/kratos/kratos.log
fields:
module: kratos
category: host
tags: beat-ext
processors:
- decode_json_fields:
fields: ["message"]
@@ -341,13 +352,15 @@ filebeat.inputs:
target: ''
fields:
event.dataset: access
pipeline: "kratos"
fields_under_root: true
clean_removed: false
close_removed: false
{%- endif %}
{%- if grains.role == 'so-idh' %}
- type: log
- type: filestream
id: idh
paths:
- /nsm/idh/opencanary.log
fields:
@@ -436,6 +449,12 @@ output.elasticsearch:
- index: "so-logscan"
when.contains:
module: "logscan"
- index: "so-elasticsearch-%{+YYYY.MM.dd}"
when.contains:
event.module: "elasticsearch"
- index: "so-kibana-%{+YYYY.MM.dd}"
when.contains:
event.module: "kibana"
setup.template.enabled: false
{%- else %}

View File

@@ -1,18 +1,2 @@
# DO NOT EDIT THIS FILE
{%- if MODULES.modules is iterable and MODULES.modules is not string and MODULES.modules|length > 0%}
{%- for module in MODULES.modules.keys() %}
- module: {{ module }}
{%- for fileset in MODULES.modules[module] %}
{{ fileset }}:
enabled: {{ MODULES.modules[module][fileset].enabled|string|lower }}
{#- only manage the settings if the fileset is enabled #}
{%- if MODULES.modules[module][fileset].enabled %}
{%- for var, value in MODULES.modules[module][fileset].items() %}
{%- if var|lower != 'enabled' %}
{{ var }}: {{ value }}
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endfor %}
{%- endfor %}
{% endif %}
{{ MODULES|yaml(False) }}

View File

@@ -18,8 +18,8 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set LOCALHOSTNAME = salt['grains.get']('host') %}
{% set MANAGER = salt['grains.get']('master') %}
{% from 'filebeat/map.jinja' import THIRDPARTY with context %}
{% from 'filebeat/map.jinja' import SO with context %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
{% from 'filebeat/modules.map.jinja' import MODULESENABLED with context %}
{% from 'filebeat/map.jinja' import FILEBEAT_EXTRA_HOSTS with context %}
{% set ES_INCLUDED_NODES = ['so-eval', 'so-standalone', 'so-managersearch', 'so-node', 'so-heavynode', 'so-import'] %}
@@ -88,21 +88,21 @@ filebeatmoduleconf:
- template: jinja
- show_changes: False
sodefaults_module_conf:
merged_module_conf:
file.managed:
- name: /opt/so/conf/filebeat/modules/securityonion.yml
- name: /opt/so/conf/filebeat/modules/modules.yml
- source: salt://filebeat/etc/module_config.yml.jinja
- template: jinja
- defaults:
MODULES: {{ SO }}
MODULES: {{ MODULESENABLED }}
thirdparty_module_conf:
file.managed:
so_module_conf_remove:
file.absent:
- name: /opt/so/conf/filebeat/modules/securityonion.yml
thirdyparty_module_conf_remove:
file.absent:
- name: /opt/so/conf/filebeat/modules/thirdparty.yml
- source: salt://filebeat/etc/module_config.yml.jinja
- template: jinja
- defaults:
MODULES: {{ THIRDPARTY }}
so-filebeat:
docker_container.running:
@@ -127,11 +127,11 @@ so-filebeat:
- 0.0.0.0:514:514/udp
- 0.0.0.0:514:514/tcp
- 0.0.0.0:5066:5066/tcp
{% for module in THIRDPARTY.modules.keys() %}
{% for submodule in THIRDPARTY.modules[module] %}
{% if THIRDPARTY.modules[module][submodule].enabled and THIRDPARTY.modules[module][submodule]["var.syslog_port"] is defined %}
- {{ THIRDPARTY.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ THIRDPARTY.modules[module][submodule]["var.syslog_port"] }}:{{ THIRDPARTY.modules[module][submodule]["var.syslog_port"] }}/tcp
- {{ THIRDPARTY.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ THIRDPARTY.modules[module][submodule]["var.syslog_port"] }}:{{ THIRDPARTY.modules[module][submodule]["var.syslog_port"] }}/udp
{% for module in MODULESMERGED.modules.keys() %}
{% for submodule in MODULESMERGED.modules[module] %}
{% if MODULESMERGED.modules[module][submodule].enabled and MODULESMERGED.modules[module][submodule]["var.syslog_port"] is defined %}
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/tcp
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/udp
{% endif %}
{% endfor %}
{% endfor %}

View File

@@ -1,10 +1,3 @@
{% import_yaml 'filebeat/thirdpartydefaults.yaml' as TPDEFAULTS %}
{% set THIRDPARTY = salt['pillar.get']('filebeat:third_party_filebeat', default=TPDEFAULTS.third_party_filebeat, merge=True) %}
{% import_yaml 'filebeat/securityoniondefaults.yaml' as SODEFAULTS %}
{% set SO = SODEFAULTS.securityonion_filebeat %}
{#% set SO = salt['pillar.get']('filebeat:third_party_filebeat', default=SODEFAULTS.third_party_filebeat, merge=True) %#}
{% set role = grains.role %}
{% set FILEBEAT_EXTRA_HOSTS = [] %}
{% set mainint = salt['pillar.get']('host:mainint') %}

View File

@@ -0,0 +1,18 @@
{% import_yaml 'filebeat/thirdpartydefaults.yaml' as TPDEFAULTS %}
{% import_yaml 'filebeat/securityoniondefaults.yaml' as SODEFAULTS %}
{% set THIRDPARTY = salt['pillar.get']('filebeat:third_party_filebeat', default=TPDEFAULTS.third_party_filebeat, merge=True) %}
{% set SO = salt['pillar.get']('filebeat:securityonion_filebeat', default=SODEFAULTS.securityonion_filebeat, merge=True) %}
{% set MODULESMERGED = salt['defaults.merge'](SO, THIRDPARTY, in_place=False) %}
{% set MODULESENABLED = [] %}
{% for module in MODULESMERGED.modules.keys() %}
{% set ENABLEDFILESETS = {} %}
{% for fileset in MODULESMERGED.modules[module] %}
{% if MODULESMERGED.modules[module][fileset].get('enabled', False) %}
{% do ENABLEDFILESETS.update({'module': module, fileset: MODULESMERGED.modules[module][fileset]}) %}
{% endif %}
{% endfor %}
{% if ENABLEDFILESETS|length > 0 %}
{% do MODULESENABLED.append(ENABLEDFILESETS) %}
{% endif %}
{% endfor %}

View File

@@ -1,7 +1,7 @@
filebeat:
config:
inputs:
- type: log
- type: filestream
paths:
- /nsm/mylogdir/mylog.log
fields:
@@ -19,4 +19,4 @@ filebeat:
output:
file:
path: "/tmp/filebeat"
filename: filebeat
filename: filebeat

View File

@@ -74,12 +74,6 @@ third_party_filebeat:
enabled: false
amp:
enabled: false
cyberark:
corepas:
enabled: false
var.input: udp
var.syslog_host: 0.0.0.0
var.syslog_port: 9527
cylance:
protect:
enabled: false
@@ -259,8 +253,6 @@ third_party_filebeat:
enabled: false
anomalithreatstream:
enabled: false
recordedfuture:
enabled: false
zscaler:
zia:
enabled: false

View File

@@ -59,7 +59,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do
RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/7.17.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/8.3.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done

View File

@@ -2,7 +2,7 @@
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
{% if salt['pillar.get']('elasticsearch:auth:enabled', False) %}
{% do KIBANACONFIG.kibana.config.elasticsearch.update({'username': salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user'), 'password': salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass')}) %}
{% do KIBANACONFIG.kibana.config.elasticsearch.update({'username': salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user'), 'password': salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass')}) %}
{% else %}
{% do KIBANACONFIG.kibana.config.xpack.update({'security': {'authc': {'providers': {'anonymous': {'anonymous1': {'order': 0, 'credentials': 'elasticsearch_anonymous_user'}}}}}}) %}
{% endif %}

View File

@@ -28,7 +28,8 @@ kibana:
security:
showInsecureClusterWarning: False
xpack:
ml:
enabled: False
security:
secureCookies: True
secureCookies: true
reporting:
kibanaServer:
hostname: localhost

View File

@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "7.17.4","id": "7.17.4","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.3.2","id": "8.3.2","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

View File

@@ -0,0 +1,22 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
output {
if [module] =~ "kratos" and "import" not in [tags] {
elasticsearch {
pipeline => "kratos"
hosts => "{{ ES }}"
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
{% endif %}
index => "so-kratos"
ssl => true
ssl_certificate_verification => false
}
}
}

View File

@@ -42,7 +42,7 @@ gpgkey=file:///etc/pki/rpm-gpg/docker.pub
[saltstack]
name=SaltStack repo for RHEL/CentOS $releasever PY3
baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack/
baseurl=https://repo.securityonion.net/file/securityonion-repo/salt/
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/SALTSTACK-GPG-KEY.pub

View File

@@ -42,7 +42,7 @@ gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/docker.pub
[saltstack]
name=SaltStack repo for RHEL/CentOS $releasever PY3
baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack/
baseurl=http://repocache.securityonion.net/file/securityonion-repo/salt/
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub

View File

@@ -7,7 +7,7 @@ saltstack.list:
file.managed:
- name: /etc/apt/sources.list.d/saltstack.list
- contents:
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt/ {{grains.oscodename}} main
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt3004.2/ {{grains.oscodename}} main
apt_update:
cmd.run:

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
version: 3004.1
version: 3004.2

View File

@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
version: 3004.1
version: 3004.2
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds.

View File

@@ -4216,17 +4216,35 @@ install_centos_stable_deps() {
install_centos_stable() {
__PACKAGES=""
local cloud='salt-cloud'
local master='salt-master'
local minion='salt-minion'
local syndic='salt-syndic'
if echo "$STABLE_REV" | grep -q "archive";then # point release being applied
local ver=$(echo "$STABLE_REV"|awk -F/ '{print $2}') # strip archive/
elif echo "$STABLE_REV" | egrep -vq "archive|latest";then # latest or major version(3003, 3004, etc) being applie
local ver=$STABLE_REV
fi
if [ ! -z $ver ]; then
cloud+="-$ver"
master+="-$ver"
minion+="-$ver"
syndic+="-$ver"
fi
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then
__PACKAGES="${__PACKAGES} salt-cloud"
__PACKAGES="${__PACKAGES} $cloud"
fi
if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then
__PACKAGES="${__PACKAGES} salt-master"
__PACKAGES="${__PACKAGES} $master"
fi
if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then
__PACKAGES="${__PACKAGES} salt-minion"
__PACKAGES="${__PACKAGES} $minion"
fi
if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then
__PACKAGES="${__PACKAGES} salt-syndic"
__PACKAGES="${__PACKAGES} $syndic"
fi
# shellcheck disable=SC2086

View File

@@ -15,8 +15,9 @@ function ci() {
exit 1
fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
pytest "$TARGET_DIR" "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
}
function download() {

View File

@@ -1 +1 @@
file_path: []
file_path: "{{ salt['pillar.get']('sensoroni:analyzers:localfile:file_path', '') }}"

View File

@@ -17,13 +17,16 @@ class TestLocalfileMethods(unittest.TestCase):
def test_main_success(self):
output = {"foo": "bar"}
conf = {"file_path": ["somefile.csv"]}
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('localfile.localfile.analyze', new=MagicMock(return_value=output)) as mock:
sys.argv = ["cmd", "input"]
localfile.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
with patch('helpers.loadConfig', new=MagicMock(return_value=conf)) as lcmock:
sys.argv = ["cmd", "input"]
localfile.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
lcmock.assert_called_once()
def test_checkConfigRequirements_present(self):
conf = {"file_path": "['intel.csv']"}

View File

@@ -35,7 +35,9 @@ class TestMalwareHashRegistryMethods(unittest.TestCase):
response = malwarehashregistry.sendReq(hash)
mock.assert_called_once_with(options, hash, flags)
self.assertIsNotNone(response)
self.assertEqual(response, {"hash": "84af04b8e69682782607a0c5796ca56999eda6b3", "last_seen": "2019-15-07 03:30:33", "av_detection_percentage": 35})
self.assertEqual(response["hash"], "84af04b8e69682782607a0c5796ca56999eda6b3")
self.assertRegex(response["last_seen"], r'2019-..-07 ..:..:33') # host running this test won't always use UTC
self.assertEqual(response["av_detection_percentage"], 35)
def test_sendReqNoData(self):
output = "84af04b8e69682782607a0c5796ca5696b3 NO_DATA"

View File

@@ -1,5 +1,5 @@
[
{ "name": "Overview", "description": "Overview of all events", "query": "* | groupby -sankey event.dataset event.category* | groupby event.dataset | groupby -bar event.module | groupby event.module | groupby -pie event.category | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip | groupby destination.port"},
{ "name": "Overview", "description": "Overview of all events", "query": "* | groupby -sankey event.dataset event.category* | groupby -pie event.category | groupby -bar event.module | groupby event.dataset | groupby event.module | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip | groupby destination.port"},
{ "name": "SOC Auth", "description": "Show all SOC authentication logs", "query": "event.module:kratos AND event.dataset:audit AND msg:authenticated | groupby http_request.headers.x-real-ip | groupby identity_id | groupby http_request.headers.user-agent"},
{ "name": "Elastalerts", "description": "Elastalert logs", "query": "_index: \"*:elastalert*\" | groupby rule_name | groupby alert_info.type"},
{ "name": "Alerts", "description": "Show all alerts", "query": "event.dataset: alert | groupby event.module | groupby rule.name | groupby event.severity | groupby source.ip | groupby destination.ip | groupby destination.port"},
@@ -16,7 +16,7 @@
{ "name": "DPD", "description": "Dynamic Protocol Detection errors", "query": "event.dataset:dpd | groupby error.reason | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol"},
{ "name": "Files", "description": "Files seen in network traffic", "query": "event.dataset:file | groupby file.mime_type | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip"},
{ "name": "FTP", "description": "File Transfer Protocol logs", "query": "event.dataset:ftp | groupby ftp.command | groupby ftp.argument | groupby ftp.user | groupby source.ip | groupby destination.ip | groupby destination.port"},
{ "name": "HTTP", "description": "Hyper Text Transport Protocol logs", "query": "event.dataset:http | groupby http.method | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby source.ip | groupby destination.ip | groupby destination.port"},
{ "name": "HTTP", "description": "Hyper Text Transport Protocol logs", "query": "event.dataset:http | groupby http.method | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby file.resp_mime_types | groupby source.ip | groupby destination.ip | groupby destination.port"},
{ "name": "Intel", "description": "Zeek Intel framework hits", "query": "event.dataset:intel | groupby intel.indicator | groupby intel.indicator_type | groupby intel.seen_where | groupby source.ip | groupby destination.ip | groupby destination.port"},
{ "name": "IRC", "description": "Internet Relay Chat logs", "query": "event.dataset:irc | groupby irc.command.type | groupby irc.username | groupby irc.nickname | groupby irc.command.value | groupby irc.command.info | groupby source.ip | groupby destination.ip | groupby destination.port"},
{ "name": "Kerberos", "description": "Kerberos logs", "query": "event.dataset:kerberos | groupby kerberos.service | groupby kerberos.client | groupby kerberos.request_type | groupby source.ip | groupby destination.ip | groupby destination.port"},

View File

@@ -218,7 +218,7 @@ suricata:
enabled: "yes"
# memcap: 64mb
rdp:
#enabled: "no"
enabled: "yes"
ssh:
enabled: "yes"
smtp:
@@ -331,7 +331,16 @@ suricata:
dhcp:
enabled: "yes"
sip:
#enabled: "no"
enabled: "yes"
rfb:
enabled: "yes"
detection-ports:
dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
mqtt:
enabled: "no"
http2:
enabled: "no"
asn1-max-frames: 256
run-as:
user: suricata

View File

@@ -30,13 +30,13 @@ fi
echo "Applying cross cluster search config..."
{{ ELASTICCURL }} -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
-d "{\"persistent\": {\"cluster\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
# Add all the search nodes to cross cluster searching.
{%- if TRUECLUSTER is sameas false %}
{%- if salt['pillar.get']('nodestab', {}) %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
{{ ELASTICCURL }} -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
{{ ELASTICCURL }} -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"cluster": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
{%- endfor %}
{%- endif %}
{%- endif %}

View File

@@ -28,4 +28,4 @@ fi
echo "Applying cross cluster search config..."
{{ ELASTICCURL }} -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
-d "{\"persistent\": {\"cluster\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"

View File

@@ -145,7 +145,7 @@ analyst_salt_local() {
securityonion_repo
gpg_rpm_import
# Install salt
logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
logCmd "yum -y install salt-minion-3004.2 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
logCmd "yum -y update --exclude=salt*"
salt-call state.apply workstation --local --file-root=../salt/ -l info 2>&1 | tee -a outfile
@@ -2277,7 +2277,7 @@ saltify() {
fi
set_progress_str 7 'Installing salt-master'
if [[ ! $is_iso ]]; then
logCmd "yum -y install salt-master-3004.1"
logCmd "yum -y install salt-master-3004.2"
fi
logCmd "systemctl enable salt-master"
;;
@@ -2290,7 +2290,7 @@ saltify() {
fi
set_progress_str 8 'Installing salt-minion & python modules'
if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then
logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
logCmd "yum -y install salt-minion-3004.2 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
logCmd "yum -y update --exclude=salt*"
fi
logCmd "systemctl enable salt-minion"
@@ -2330,7 +2330,7 @@ saltify() {
# Add saltstack repo(s)
wget -q --inet4-only -O - https://repo.securityonion.net/file/securityonion-repo/ubuntu/"$ubuntu_version"/amd64/salt/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
# Add Docker repo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
@@ -2351,7 +2351,7 @@ saltify() {
set_progress_str 6 'Installing various dependencies'
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
set_progress_str 7 'Installing salt-master'
retry 50 10 "apt-get -y install salt-master=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install salt-master=3004.2+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
;;
*)
@@ -2362,14 +2362,14 @@ saltify() {
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
;;
esac
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
set_progress_str 8 'Installing salt-minion & python modules'
retry 50 10 "apt-get -y install salt-minion=3004.1+ds-1 salt-common=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install salt-minion=3004.2+ds-1 salt-common=3004.2+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" >> "$setup_log" 2>&1 || exit 1
fi

View File

@@ -1106,9 +1106,9 @@ if [[ $success != 0 ]]; then SO_ERROR=1; fi
# Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox
# Ignore "Status .* was not found" due to output from salt http.query or http.wait_for_successful_query states used with retry
# Uncaught exception, closing connection|Exception in callback None - this is seen during influxdb / http.wait_for_successful_query state for ubuntu reinstall
if grep -E "ERROR|Result: False" $setup_log | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100" || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then
if grep -E "ERROR|Result: False" $setup_log | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100|Running scope as unit" || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then
SO_ERROR=1
grep --color=never "ERROR" "$setup_log" | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100" > "$error_log"
grep --color=never "ERROR" "$setup_log" | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100|Running scope as unit" > "$error_log"
fi
if [[ -n $SO_ERROR ]]; then

Binary file not shown.