mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge pull request #8432 from Security-Onion-Solutions/dev
Merge dev into foxtrot
This commit is contained in:
24
.github/workflows/contrib.yml
vendored
Normal file
24
.github/workflows/contrib.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: contrib
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_target:
|
||||
types: [opened,closed,synchronize]
|
||||
|
||||
jobs:
|
||||
CLAssistant:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Contributor Check"
|
||||
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
|
||||
uses: cla-assistant/github-action@v2.1.3-beta
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PERSONAL_ACCESS_TOKEN : ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
||||
with:
|
||||
path-to-signatures: 'signatures_v1.json'
|
||||
path-to-document: 'https://securityonionsolutions.com/cla'
|
||||
allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,weslambert,defensivedepth,m0duspwnens
|
||||
remote-organization-name: Security-Onion-Solutions
|
||||
remote-repository-name: licensing
|
||||
|
||||
2
.github/workflows/leaktest.yml
vendored
2
.github/workflows/leaktest.yml
vendored
@@ -12,6 +12,6 @@ jobs:
|
||||
fetch-depth: '0'
|
||||
|
||||
- name: Gitleaks
|
||||
uses: zricethezav/gitleaks-action@master
|
||||
uses: gitleaks/gitleaks-action@v1.6.0
|
||||
with:
|
||||
config-path: .github/.gitleaks.toml
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Security Onion 2.3.130
|
||||
## Security Onion 2.3.150
|
||||
|
||||
Security Onion 2.3.130 is here!
|
||||
Security Onion 2.3.150 is here!
|
||||
|
||||
## Screenshots
|
||||
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
### 2.3.130-20220607 ISO image built on 2022/06/07
|
||||
### 2.3.140-20220719 ISO image built on 2022/07/19
|
||||
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.3.130-20220607 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso
|
||||
2.3.140-20220719 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.140-20220719.iso
|
||||
|
||||
MD5: 0034D6A9461C04357AFF512875408A4C
|
||||
SHA1: BF80EEB101C583153CAD8E185A7DB3173FD5FFE8
|
||||
SHA256: 15943623B96D8BB4A204A78668447F36B54A63ABA5F8467FBDF0B25C5E4E6078
|
||||
MD5: 68768DF9861B93BB8CC9637C80239803
|
||||
SHA1: F15421C045227B334C7044E5F7F309A2BC7AEB19
|
||||
SHA256: 4736E3E80E28EFBAB1923C121A3F78DBDBCBBBF65D715924A88B2E96EB3C6093
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.130-20220607.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.140-20220719.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.130-20220607.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.140-20220719.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.140-20220719.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.3.130-20220607.iso.sig securityonion-2.3.130-20220607.iso
|
||||
gpg --verify securityonion-2.3.140-20220719.iso.sig securityonion-2.3.140-20220719.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Tue 07 Jun 2022 01:27:20 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Tue 19 Jul 2022 02:00:29 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -14,4 +14,5 @@ logstash:
|
||||
- so/9700_output_strelka.conf.jinja
|
||||
- so/9800_output_logscan.conf.jinja
|
||||
- so/9801_output_rita.conf.jinja
|
||||
- so/9802_output_kratos.conf.jinja
|
||||
- so/9900_output_endgame.conf.jinja
|
||||
|
||||
@@ -29,7 +29,7 @@ fi
|
||||
|
||||
interface="$1"
|
||||
shift
|
||||
sudo tcpdump -i $interface -ddd $@ | tail -n+2 |
|
||||
tcpdump -i $interface -ddd $@ | tail -n+2 |
|
||||
while read line; do
|
||||
cols=( $line )
|
||||
printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
@@ -26,6 +27,7 @@ hostgroupsFilename = "/opt/so/saltstack/local/salt/firewall/hostgroups.local.yam
|
||||
portgroupsFilename = "/opt/so/saltstack/local/salt/firewall/portgroups.local.yaml"
|
||||
defaultPortgroupsFilename = "/opt/so/saltstack/default/salt/firewall/portgroups.yaml"
|
||||
supportedProtocols = ['tcp', 'udp']
|
||||
readonly = False
|
||||
|
||||
def showUsage(options, args):
|
||||
print('Usage: {} [OPTIONS] <COMMAND> [ARGS...]'.format(sys.argv[0]))
|
||||
@@ -70,10 +72,26 @@ def checkApplyOption(options):
|
||||
return apply(None, None)
|
||||
|
||||
def loadYaml(filename):
|
||||
global readonly
|
||||
|
||||
file = open(filename, "r")
|
||||
return yaml.safe_load(file.read())
|
||||
content = file.read()
|
||||
|
||||
# Remove Jinja templating (for read-only operations)
|
||||
if "{%" in content or "{{" in content:
|
||||
content = content.replace("{{ ssh_port }}", "22")
|
||||
pattern = r'.*({%|{{|}}|%}).*'
|
||||
content = re.sub(pattern, "", content)
|
||||
readonly = True
|
||||
|
||||
return yaml.safe_load(content)
|
||||
|
||||
def writeYaml(filename, content):
|
||||
global readonly
|
||||
|
||||
if readonly:
|
||||
raise Exception("Cannot write yaml file that has been flagged as read-only")
|
||||
|
||||
file = open(filename, "w")
|
||||
return yaml.dump(content, file)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
. /usr/sbin/so-common
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "{{ ELASTICCURL }}"
|
||||
wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "{{ ELASTICCURL }}"
|
||||
## This hackery will be removed if using Elastic Auth ##
|
||||
|
||||
# Let's snag a cookie from Kibana
|
||||
@@ -12,6 +13,6 @@ echo "Setting up default Space:"
|
||||
{% if HIGHLANDER %}
|
||||
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log
|
||||
{% else %}
|
||||
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
|
||||
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet","fleetv2","securitySolutionCases"]} ' >> /opt/so/log/kibana/misc.log
|
||||
{% endif %}
|
||||
echo
|
||||
|
||||
@@ -437,7 +437,7 @@ function updateStatus() {
|
||||
state="inactive"
|
||||
fi
|
||||
body="{ \"schema_id\": \"$schemaId\", \"state\": \"$state\", \"traits\": $traitBlock }"
|
||||
response=$(curl -fSsL -XPUT "${kratosUrl}/identities/$identityId" -d "$body")
|
||||
response=$(curl -fSsL -XPUT -H "Content-Type: application/json" "${kratosUrl}/identities/$identityId" -d "$body")
|
||||
[[ $? != 0 ]] && fail "Unable to update user"
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ fi
|
||||
|
||||
{% else %}
|
||||
|
||||
gh_status=$(curl -s -o /dev/null -w "%{http_code}" http://github.com)
|
||||
gh_status=$(curl -s -o /dev/null -w "%{http_code}" https://github.com)
|
||||
clone_dir="/tmp"
|
||||
if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
|
||||
|
||||
|
||||
@@ -371,12 +371,109 @@ clone_to_tmp() {
|
||||
fi
|
||||
}
|
||||
|
||||
elastalert_indices_check() {
|
||||
|
||||
# Stop Elastalert to prevent Elastalert indices from being re-created
|
||||
if grep -q "^so-elastalert$" /opt/so/conf/so-status/so-status.conf ; then
|
||||
so-elastalert-stop || true
|
||||
fi
|
||||
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
so-elasticsearch-query / -k --output /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
|
||||
# Unable to connect to Elasticsearch
|
||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Elastalert indices
|
||||
echo "Deleting Elastalert indices to prevent issues with upgrade to Elastic 8..."
|
||||
CHECK_COUNT=0
|
||||
while [[ "$CHECK_COUNT" -le 2 ]]; do
|
||||
# Delete Elastalert indices
|
||||
for i in $(so-elasticsearch-query _cat/indices | grep elastalert | awk '{print $3}'); do
|
||||
so-elasticsearch-query $i -XDELETE;
|
||||
done
|
||||
|
||||
# Check to ensure Elastalert indices are deleted
|
||||
COUNT=0
|
||||
ELASTALERT_INDICES_DELETED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
RESPONSE=$(so-elasticsearch-query elastalert*)
|
||||
if [[ "$RESPONSE" == "{}" ]]; then
|
||||
ELASTALERT_INDICES_DELETED="yes"
|
||||
echo "Elastalert indices successfully deleted."
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
((CHECK_COUNT+=1))
|
||||
done
|
||||
|
||||
# If we were unable to delete the Elastalert indices, exit the script
|
||||
if [ "$ELASTALERT_INDICES_DELETED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Unable to connect to delete Elastalert indices. Exiting."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
enable_highstate() {
|
||||
echo "Enabling highstate."
|
||||
salt-call state.enable highstate -l info --local
|
||||
echo ""
|
||||
}
|
||||
|
||||
es_version_check() {
|
||||
CHECK_ES=$(echo $INSTALLEDVERSION | awk -F. '{print $3}')
|
||||
|
||||
if [ "$CHECK_ES" -lt "110" ]; then
|
||||
echo "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version 2.3.130 before updating to 2.3.140 or higher."
|
||||
echo ""
|
||||
echo "If your deployment has Internet access, you can use the following command to update to 2.3.130:"
|
||||
echo "sudo BRANCH=2.3.130-20220607 soup"
|
||||
echo ""
|
||||
echo "Otherwise, if your deployment is configured for airgap, you can instead download the 2.3.130 ISO image from https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso."
|
||||
echo ""
|
||||
echo "*** Once you have updated to 2.3.130, you can then update to 2.3.140 or higher as you would normally. ***"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
es_indices_check() {
|
||||
echo "Checking for unsupported Elasticsearch indices..."
|
||||
UNSUPPORTED_INDICES=$(for INDEX in $(so-elasticsearch-indices-list | awk '{print $3}'); do so-elasticsearch-query $INDEX/_settings?human |grep '"created_string":"6' | jq -r 'keys'[0]; done)
|
||||
if [ -z "$UNSUPPORTED_INDICES" ]; then
|
||||
echo "No unsupported indices found."
|
||||
else
|
||||
echo "The following indices were created with Elasticsearch 6, and are not supported when upgrading to Elasticsearch 8. These indices may need to be deleted, migrated, or re-indexed before proceeding with the upgrade. Please see https://docs.securityonion.net/en/2.3/soup.html#elastic-8 for more details."
|
||||
echo
|
||||
echo "$UNSUPPORTED_INDICES"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
generate_and_clean_tarballs() {
|
||||
local new_version
|
||||
new_version=$(cat $UPDATE_DIR/VERSION)
|
||||
@@ -440,6 +537,7 @@ postupgrade_changes() {
|
||||
[[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110
|
||||
[[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120
|
||||
[[ "$POSTVERSION" == 2.3.120 ]] && post_to_2.3.130
|
||||
[[ "$POSTVERSION" == 2.3.130 ]] && post_to_2.3.140
|
||||
|
||||
|
||||
true
|
||||
@@ -516,6 +614,14 @@ post_to_2.3.130() {
|
||||
POSTVERSION=2.3.130
|
||||
}
|
||||
|
||||
post_to_2.3.140() {
|
||||
echo "Post Processing for 2.3.140"
|
||||
FORCE_SYNC=true so-user sync
|
||||
so-kibana-restart
|
||||
so-kibana-space-defaults
|
||||
POSTVERSION=2.3.140
|
||||
}
|
||||
|
||||
|
||||
|
||||
stop_salt_master() {
|
||||
@@ -763,10 +869,13 @@ up_to_2.3.100() {
|
||||
|
||||
echo "Adding receiver to assigned_hostgroups.local.map.yaml"
|
||||
grep -qxF " receiver:" /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml || sed -i -e '$a\ receiver:' /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml
|
||||
|
||||
INSTALLEDVERSION=2.3.100
|
||||
}
|
||||
|
||||
up_to_2.3.110() {
|
||||
sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls
|
||||
INSTALLEDVERSION=2.3.110
|
||||
}
|
||||
|
||||
up_to_2.3.120() {
|
||||
@@ -774,42 +883,19 @@ up_to_2.3.120() {
|
||||
so-thehive-stop
|
||||
so-thehive-es-stop
|
||||
so-cortex-stop
|
||||
INSTALLEDVERSION=2.3.120
|
||||
}
|
||||
|
||||
up_to_2.3.130() {
|
||||
# Remove file for nav update
|
||||
rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json
|
||||
INSTALLEDVERSION=2.3.130
|
||||
}
|
||||
|
||||
up_to_2.3.140() {
|
||||
## Deleting Elastalert indices to prevent issues with upgrade to Elastic 8 ##
|
||||
echo "Deleting Elastalert indices to prevent issues with upgrade to Elastic 8..."
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
so-elasticsearch-query -k --output /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete Elastalert indices
|
||||
for i in $(so-elasticsearch-query _cat/indices | grep elastalert | awk '{print $3}'); do so-elasticsearch-query $i -XDELETE; done
|
||||
elastalert_indices_check
|
||||
##
|
||||
INSTALLEDVERSION=2.3.140
|
||||
}
|
||||
|
||||
verify_upgradespace() {
|
||||
@@ -990,7 +1076,7 @@ update_repo() {
|
||||
fi
|
||||
|
||||
rm -f /etc/apt/sources.list.d/salt.list
|
||||
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt $OSVER main" > /etc/apt/sources.list.d/saltstack.list
|
||||
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list
|
||||
apt-get update
|
||||
fi
|
||||
}
|
||||
@@ -1125,6 +1211,9 @@ main() {
|
||||
fi
|
||||
echo "Verifying we have the latest soup script."
|
||||
verify_latest_update_script
|
||||
es_version_check
|
||||
es_indices_check
|
||||
elastalert_indices_check
|
||||
echo ""
|
||||
set_palette
|
||||
check_elastic_license
|
||||
|
||||
29
salt/curator/files/action/so-kratos-close.yml
Normal file
29
salt/curator/files/action/so-kratos-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-kratos:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close kratos indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-kratos.*|so-kratos.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-kratos-delete.yml
Normal file
29
salt/curator/files/action/so-kratos-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kratos:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete kratos indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-kratos.*|so-kratos.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-kratos-warm.yml
Normal file
24
salt/curator/files/action/so-kratos-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kratos:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-kratos
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
@@ -32,6 +32,8 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kibana-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1;
|
||||
|
||||
@@ -31,6 +31,7 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1;
|
||||
|
||||
@@ -31,6 +31,7 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-delete.yml > /dev/null 2>&1;
|
||||
|
||||
@@ -31,6 +31,7 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-warm.yml > /dev/null 2>&1;
|
||||
|
||||
@@ -201,8 +201,8 @@ so-curatorclusterclose:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1
|
||||
- user: root
|
||||
- minute: '2'
|
||||
- hour: '*/1'
|
||||
- minute: '5'
|
||||
- hour: '1'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
@@ -211,8 +211,8 @@ so-curatorclusterdelete:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-delete.log 2>&1
|
||||
- user: root
|
||||
- minute: '2'
|
||||
- hour: '*/1'
|
||||
- minute: '5'
|
||||
- hour: '1'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
@@ -221,8 +221,8 @@ so-curatorclusterwarm:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-cluster-warm > /opt/so/log/curator/cron-warm.log 2>&1
|
||||
- user: root
|
||||
- minute: '2'
|
||||
- hour: '*/1'
|
||||
- minute: '5'
|
||||
- hour: '1'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
@@ -129,6 +129,9 @@ so-elastalert:
|
||||
- file: elastaconf
|
||||
- watch:
|
||||
- file: elastaconf
|
||||
- onlyif:
|
||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
|
||||
|
||||
|
||||
append_so-elastalert_so-status.conf:
|
||||
file.append:
|
||||
|
||||
@@ -51,9 +51,10 @@
|
||||
},
|
||||
{ "set": { "field": "_index", "value": "so-firewall", "override": true } },
|
||||
{ "set": { "if": "ctx.network?.transport_id == '0'", "field": "network.transport", "value": "icmp", "override": true } },
|
||||
{"community_id": {} },
|
||||
{ "community_id": {} },
|
||||
{ "set": { "field": "module", "value": "pfsense", "override": true } },
|
||||
{ "set": { "field": "dataset", "value": "firewall", "override": true } },
|
||||
{ "set": { "field": "category", "value": "network", "override": true } },
|
||||
{ "remove": { "field": ["real_message", "ip_sub_msg", "firewall.sub_message"], "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
|
||||
13
salt/elasticsearch/files/ingest/kratos
Normal file
13
salt/elasticsearch/files/ingest/kratos
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"description" : "kratos",
|
||||
"processors" : [
|
||||
{
|
||||
"set": {
|
||||
"field": "_index",
|
||||
"value": "so-kratos",
|
||||
"override": true
|
||||
}
|
||||
},
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
2
salt/elasticsearch/tools/sbin/so-elasticsearch-templates-load
Executable file → Normal file
2
salt/elasticsearch/tools/sbin/so-elasticsearch-templates-load
Executable file → Normal file
@@ -30,7 +30,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
so-elasticsearch-query -k --output /dev/null --silent --head --fail
|
||||
so-elasticsearch-query / -k --output /dev/null --silent --head --fail
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
|
||||
@@ -118,6 +118,7 @@ filebeat.inputs:
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
|
||||
- type: filestream
|
||||
id: logscan
|
||||
paths:
|
||||
- /logs/logscan/alerts.log
|
||||
fields:
|
||||
@@ -135,6 +136,7 @@ filebeat.inputs:
|
||||
{%- if ZEEKVER != 'SURICATA' %}
|
||||
{%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %}
|
||||
- type: filestream
|
||||
id: zeek-{{ LOGNAME }}
|
||||
paths:
|
||||
- /nsm/zeek/logs/current/{{ LOGNAME }}.log
|
||||
fields:
|
||||
@@ -150,6 +152,7 @@ filebeat.inputs:
|
||||
close_removed: false
|
||||
|
||||
- type: filestream
|
||||
id: import-zeek={{ LOGNAME }}
|
||||
paths:
|
||||
- /nsm/import/*/zeek/logs/{{ LOGNAME }}.log
|
||||
fields:
|
||||
@@ -174,6 +177,7 @@ filebeat.inputs:
|
||||
{%- endif %}
|
||||
|
||||
- type: filestream
|
||||
id: suricata-eve
|
||||
paths:
|
||||
- /nsm/suricata/eve*.json
|
||||
fields:
|
||||
@@ -190,6 +194,7 @@ filebeat.inputs:
|
||||
close_removed: false
|
||||
|
||||
- type: filestream
|
||||
id: import-suricata
|
||||
paths:
|
||||
- /nsm/import/*/suricata/eve*.json
|
||||
fields:
|
||||
@@ -212,6 +217,7 @@ filebeat.inputs:
|
||||
close_removed: false
|
||||
{%- if STRELKAENABLED == 1 %}
|
||||
- type: filestream
|
||||
id: strelka
|
||||
paths:
|
||||
- /nsm/strelka/log/strelka.log
|
||||
fields:
|
||||
@@ -233,6 +239,7 @@ filebeat.inputs:
|
||||
{%- if WAZUHENABLED == 1 %}
|
||||
|
||||
- type: filestream
|
||||
id: wazuh
|
||||
paths:
|
||||
- /wazuh/archives/archives.json
|
||||
fields:
|
||||
@@ -251,6 +258,7 @@ filebeat.inputs:
|
||||
{%- if FLEETMANAGER or FLEETNODE %}
|
||||
|
||||
- type: filestream
|
||||
id: osquery
|
||||
paths:
|
||||
- /nsm/osquery/fleet/result.log
|
||||
fields:
|
||||
@@ -321,12 +329,12 @@ filebeat.inputs:
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
|
||||
- type: filestream
|
||||
id: kratos
|
||||
paths:
|
||||
- /logs/kratos/kratos.log
|
||||
fields:
|
||||
module: kratos
|
||||
category: host
|
||||
tags: beat-ext
|
||||
processors:
|
||||
- decode_json_fields:
|
||||
fields: ["message"]
|
||||
@@ -344,6 +352,7 @@ filebeat.inputs:
|
||||
target: ''
|
||||
fields:
|
||||
event.dataset: access
|
||||
pipeline: "kratos"
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
@@ -351,6 +360,7 @@ filebeat.inputs:
|
||||
|
||||
{%- if grains.role == 'so-idh' %}
|
||||
- type: filestream
|
||||
id: idh
|
||||
paths:
|
||||
- /nsm/idh/opencanary.log
|
||||
fields:
|
||||
@@ -439,6 +449,12 @@ output.elasticsearch:
|
||||
- index: "so-logscan"
|
||||
when.contains:
|
||||
module: "logscan"
|
||||
- index: "so-elasticsearch-%{+YYYY.MM.dd}"
|
||||
when.contains:
|
||||
event.module: "elasticsearch"
|
||||
- index: "so-kibana-%{+YYYY.MM.dd}"
|
||||
when.contains:
|
||||
event.module: "kibana"
|
||||
|
||||
setup.template.enabled: false
|
||||
{%- else %}
|
||||
|
||||
@@ -127,7 +127,14 @@ so-filebeat:
|
||||
- 0.0.0.0:514:514/udp
|
||||
- 0.0.0.0:514:514/tcp
|
||||
- 0.0.0.0:5066:5066/tcp
|
||||
|
||||
{% for module in MODULESMERGED.modules.keys() %}
|
||||
{% for submodule in MODULESMERGED.modules[module] %}
|
||||
{% if MODULESMERGED.modules[module][submodule].enabled and MODULESMERGED.modules[module][submodule]["var.syslog_port"] is defined %}
|
||||
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/tcp
|
||||
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/udp
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
- watch:
|
||||
- file: filebeatconf
|
||||
- require:
|
||||
@@ -137,14 +144,7 @@ so-filebeat:
|
||||
- x509: conf_filebeat_crt
|
||||
- x509: conf_filebeat_key
|
||||
- x509: trusttheca
|
||||
{% for module in MODULESMERGED.modules.keys() %}
|
||||
{% for submodule in MODULESMERGED.modules[module] %}
|
||||
{% if MODULESMERGED.modules[module][submodule].enabled and MODULESMERGED.modules[module][submodule]["var.syslog_port"] is defined %}
|
||||
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/tcp
|
||||
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/udp
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% if grains.role in ES_INCLUDED_NODES %}
|
||||
run_module_setup:
|
||||
cmd.run:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
filebeat:
|
||||
config:
|
||||
inputs:
|
||||
- type: log
|
||||
- type: filestream
|
||||
paths:
|
||||
- /nsm/mylogdir/mylog.log
|
||||
fields:
|
||||
|
||||
@@ -59,7 +59,7 @@ update() {
|
||||
|
||||
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
|
||||
for i in "${LINES[@]}"; do
|
||||
RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/8.2.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/8.3.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
|
||||
done
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.2.2","id": "8.2.2","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.3.2","id": "8.3.2","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
{%- if grains['role'] == 'so-eval' -%}
|
||||
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
output {
|
||||
if [module] =~ "kratos" and "import" not in [tags] {
|
||||
elasticsearch {
|
||||
pipeline => "kratos"
|
||||
hosts => "{{ ES }}"
|
||||
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
{% endif %}
|
||||
index => "so-kratos"
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -42,7 +42,7 @@ gpgkey=file:///etc/pki/rpm-gpg/docker.pub
|
||||
|
||||
[saltstack]
|
||||
name=SaltStack repo for RHEL/CentOS $releasever PY3
|
||||
baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack/
|
||||
baseurl=https://repo.securityonion.net/file/securityonion-repo/salt/
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/SALTSTACK-GPG-KEY.pub
|
||||
|
||||
@@ -42,7 +42,7 @@ gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/docker.pub
|
||||
|
||||
[saltstack]
|
||||
name=SaltStack repo for RHEL/CentOS $releasever PY3
|
||||
baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack/
|
||||
baseurl=http://repocache.securityonion.net/file/securityonion-repo/salt/
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
|
||||
|
||||
@@ -7,7 +7,7 @@ saltstack.list:
|
||||
file.managed:
|
||||
- name: /etc/apt/sources.list.d/saltstack.list
|
||||
- contents:
|
||||
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt/ {{grains.oscodename}} main
|
||||
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt3004.2/ {{grains.oscodename}} main
|
||||
|
||||
apt_update:
|
||||
cmd.run:
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||
salt:
|
||||
master:
|
||||
version: 3004.1
|
||||
version: 3004.2
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||
salt:
|
||||
minion:
|
||||
version: 3004.1
|
||||
version: 3004.2
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
service_start_delay: 30 # in seconds.
|
||||
|
||||
@@ -4216,17 +4216,35 @@ install_centos_stable_deps() {
|
||||
install_centos_stable() {
|
||||
__PACKAGES=""
|
||||
|
||||
local cloud='salt-cloud'
|
||||
local master='salt-master'
|
||||
local minion='salt-minion'
|
||||
local syndic='salt-syndic'
|
||||
|
||||
if echo "$STABLE_REV" | grep -q "archive";then # point release being applied
|
||||
local ver=$(echo "$STABLE_REV"|awk -F/ '{print $2}') # strip archive/
|
||||
elif echo "$STABLE_REV" | egrep -vq "archive|latest";then # latest or major version(3003, 3004, etc) being applie
|
||||
local ver=$STABLE_REV
|
||||
fi
|
||||
|
||||
if [ ! -z $ver ]; then
|
||||
cloud+="-$ver"
|
||||
master+="-$ver"
|
||||
minion+="-$ver"
|
||||
syndic+="-$ver"
|
||||
fi
|
||||
|
||||
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then
|
||||
__PACKAGES="${__PACKAGES} salt-cloud"
|
||||
__PACKAGES="${__PACKAGES} $cloud"
|
||||
fi
|
||||
if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then
|
||||
__PACKAGES="${__PACKAGES} salt-master"
|
||||
__PACKAGES="${__PACKAGES} $master"
|
||||
fi
|
||||
if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then
|
||||
__PACKAGES="${__PACKAGES} salt-minion"
|
||||
__PACKAGES="${__PACKAGES} $minion"
|
||||
fi
|
||||
if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then
|
||||
__PACKAGES="${__PACKAGES} salt-syndic"
|
||||
__PACKAGES="${__PACKAGES} $syndic"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
|
||||
@@ -15,8 +15,9 @@ function ci() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pip install pytest pytest-cov
|
||||
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
|
||||
pytest "$TARGET_DIR" "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100
|
||||
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
|
||||
}
|
||||
|
||||
function download() {
|
||||
|
||||
@@ -1 +1 @@
|
||||
file_path: []
|
||||
file_path: "{{ salt['pillar.get']('sensoroni:analyzers:localfile:file_path', '') }}"
|
||||
|
||||
@@ -17,13 +17,16 @@ class TestLocalfileMethods(unittest.TestCase):
|
||||
|
||||
def test_main_success(self):
|
||||
output = {"foo": "bar"}
|
||||
conf = {"file_path": ["somefile.csv"]}
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
with patch('localfile.localfile.analyze', new=MagicMock(return_value=output)) as mock:
|
||||
with patch('helpers.loadConfig', new=MagicMock(return_value=conf)) as lcmock:
|
||||
sys.argv = ["cmd", "input"]
|
||||
localfile.main()
|
||||
expected = '{"foo": "bar"}\n'
|
||||
self.assertEqual(mock_stdout.getvalue(), expected)
|
||||
mock.assert_called_once()
|
||||
lcmock.assert_called_once()
|
||||
|
||||
def test_checkConfigRequirements_present(self):
|
||||
conf = {"file_path": "['intel.csv']"}
|
||||
|
||||
@@ -35,7 +35,9 @@ class TestMalwareHashRegistryMethods(unittest.TestCase):
|
||||
response = malwarehashregistry.sendReq(hash)
|
||||
mock.assert_called_once_with(options, hash, flags)
|
||||
self.assertIsNotNone(response)
|
||||
self.assertEqual(response, {"hash": "84af04b8e69682782607a0c5796ca56999eda6b3", "last_seen": "2019-15-07 03:30:33", "av_detection_percentage": 35})
|
||||
self.assertEqual(response["hash"], "84af04b8e69682782607a0c5796ca56999eda6b3")
|
||||
self.assertRegex(response["last_seen"], r'2019-..-07 ..:..:33') # host running this test won't always use UTC
|
||||
self.assertEqual(response["av_detection_percentage"], 35)
|
||||
|
||||
def test_sendReqNoData(self):
|
||||
output = "84af04b8e69682782607a0c5796ca5696b3 NO_DATA"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[
|
||||
{ "name": "Overview", "description": "Overview of all events", "query": "* | groupby -sankey event.dataset event.category* | groupby event.dataset | groupby -bar event.module | groupby event.module | groupby -pie event.category | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
{ "name": "Overview", "description": "Overview of all events", "query": "* | groupby -sankey event.dataset event.category* | groupby -pie event.category | groupby -bar event.module | groupby event.dataset | groupby event.module | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
{ "name": "SOC Auth", "description": "Show all SOC authentication logs", "query": "event.module:kratos AND event.dataset:audit AND msg:authenticated | groupby http_request.headers.x-real-ip | groupby identity_id | groupby http_request.headers.user-agent"},
|
||||
{ "name": "Elastalerts", "description": "Elastalert logs", "query": "_index: \"*:elastalert*\" | groupby rule_name | groupby alert_info.type"},
|
||||
{ "name": "Alerts", "description": "Show all alerts", "query": "event.dataset: alert | groupby event.module | groupby rule.name | groupby event.severity | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
@@ -16,7 +16,7 @@
|
||||
{ "name": "DPD", "description": "Dynamic Protocol Detection errors", "query": "event.dataset:dpd | groupby error.reason | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol"},
|
||||
{ "name": "Files", "description": "Files seen in network traffic", "query": "event.dataset:file | groupby file.mime_type | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip"},
|
||||
{ "name": "FTP", "description": "File Transfer Protocol logs", "query": "event.dataset:ftp | groupby ftp.command | groupby ftp.argument | groupby ftp.user | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
{ "name": "HTTP", "description": "Hyper Text Transport Protocol logs", "query": "event.dataset:http | groupby http.method | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
{ "name": "HTTP", "description": "Hyper Text Transport Protocol logs", "query": "event.dataset:http | groupby http.method | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby file.resp_mime_types | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
{ "name": "Intel", "description": "Zeek Intel framework hits", "query": "event.dataset:intel | groupby intel.indicator | groupby intel.indicator_type | groupby intel.seen_where | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
{ "name": "IRC", "description": "Internet Relay Chat logs", "query": "event.dataset:irc | groupby irc.command.type | groupby irc.username | groupby irc.nickname | groupby irc.command.value | groupby irc.command.info | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
{ "name": "Kerberos", "description": "Kerberos logs", "query": "event.dataset:kerberos | groupby kerberos.service | groupby kerberos.client | groupby kerberos.request_type | groupby source.ip | groupby destination.ip | groupby destination.port"},
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"/joblookup?esid={:soc_id}&time={:@timestamp}",
|
||||
"/joblookup?ncid={:network.community_id}&time={:@timestamp}"
|
||||
],
|
||||
"categories": ["hunt", "alerts"]},
|
||||
"categories": ["hunt", "alerts", "dashboards"]},
|
||||
{ "name": "actionCyberChef", "description": "actionCyberChefHelp", "icon": "fas fa-bread-slice", "target": "_blank",
|
||||
"links": [
|
||||
"/cyberchef/#input={value|base64}"
|
||||
|
||||
@@ -218,7 +218,7 @@ suricata:
|
||||
enabled: "yes"
|
||||
# memcap: 64mb
|
||||
rdp:
|
||||
#enabled: "no"
|
||||
enabled: "yes"
|
||||
ssh:
|
||||
enabled: "yes"
|
||||
smtp:
|
||||
@@ -331,7 +331,16 @@ suricata:
|
||||
dhcp:
|
||||
enabled: "yes"
|
||||
sip:
|
||||
#enabled: "no"
|
||||
enabled: "yes"
|
||||
rfb:
|
||||
enabled: "yes"
|
||||
detection-ports:
|
||||
dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
|
||||
mqtt:
|
||||
enabled: "no"
|
||||
http2:
|
||||
enabled: "no"
|
||||
|
||||
asn1-max-frames: 256
|
||||
run-as:
|
||||
user: suricata
|
||||
|
||||
@@ -84,7 +84,9 @@ base:
|
||||
{%- if STRELKA %}
|
||||
- strelka
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
{%- if FLEETMANAGER or FLEETNODE %}
|
||||
- fleet.install_package
|
||||
{%- endif %}
|
||||
@@ -433,7 +435,9 @@ base:
|
||||
- redis
|
||||
- fleet
|
||||
- fleet.install_package
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- schedule
|
||||
- docker_clean
|
||||
|
||||
@@ -507,7 +511,9 @@ base:
|
||||
{%- endif %}
|
||||
- schedule
|
||||
- docker_clean
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- idh
|
||||
|
||||
'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )':
|
||||
|
||||
@@ -145,7 +145,7 @@ analyst_salt_local() {
|
||||
securityonion_repo
|
||||
gpg_rpm_import
|
||||
# Install salt
|
||||
logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
||||
logCmd "yum -y install salt-minion-3004.2 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
||||
logCmd "yum -y update --exclude=salt*"
|
||||
|
||||
salt-call state.apply workstation --local --file-root=../salt/ -l info 2>&1 | tee -a outfile
|
||||
@@ -2277,7 +2277,7 @@ saltify() {
|
||||
fi
|
||||
set_progress_str 7 'Installing salt-master'
|
||||
if [[ ! $is_iso ]]; then
|
||||
logCmd "yum -y install salt-master-3004.1"
|
||||
logCmd "yum -y install salt-master-3004.2"
|
||||
fi
|
||||
logCmd "systemctl enable salt-master"
|
||||
;;
|
||||
@@ -2290,7 +2290,7 @@ saltify() {
|
||||
fi
|
||||
set_progress_str 8 'Installing salt-minion & python modules'
|
||||
if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then
|
||||
logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
||||
logCmd "yum -y install salt-minion-3004.2 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
||||
logCmd "yum -y update --exclude=salt*"
|
||||
fi
|
||||
logCmd "systemctl enable salt-minion"
|
||||
@@ -2330,7 +2330,7 @@ saltify() {
|
||||
|
||||
# Add saltstack repo(s)
|
||||
wget -q --inet4-only -O - https://repo.securityonion.net/file/securityonion-repo/ubuntu/"$ubuntu_version"/amd64/salt/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
|
||||
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
|
||||
# Add Docker repo
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
|
||||
@@ -2351,7 +2351,7 @@ saltify() {
|
||||
set_progress_str 6 'Installing various dependencies'
|
||||
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
||||
set_progress_str 7 'Installing salt-master'
|
||||
retry 50 10 "apt-get -y install salt-master=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y install salt-master=3004.2+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
|
||||
;;
|
||||
*)
|
||||
@@ -2362,14 +2362,14 @@ saltify() {
|
||||
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
|
||||
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
||||
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
|
||||
;;
|
||||
esac
|
||||
|
||||
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
|
||||
set_progress_str 8 'Installing salt-minion & python modules'
|
||||
retry 50 10 "apt-get -y install salt-minion=3004.1+ds-1 salt-common=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y install salt-minion=3004.2+ds-1 salt-common=3004.2+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" >> "$setup_log" 2>&1 || exit 1
|
||||
fi
|
||||
|
||||
@@ -1106,9 +1106,9 @@ if [[ $success != 0 ]]; then SO_ERROR=1; fi
|
||||
# Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox
|
||||
# Ignore "Status .* was not found" due to output from salt http.query or http.wait_for_successful_query states used with retry
|
||||
# Uncaught exception, closing connection|Exception in callback None - this is seen during influxdb / http.wait_for_successful_query state for ubuntu reinstall
|
||||
if grep -E "ERROR|Result: False" $setup_log | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100" || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then
|
||||
if grep -E "ERROR|Result: False" $setup_log | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100|Running scope as unit" || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then
|
||||
SO_ERROR=1
|
||||
grep --color=never "ERROR" "$setup_log" | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100" > "$error_log"
|
||||
grep --color=never "ERROR" "$setup_log" | grep -qvE "Status .* was not found|An exception occurred in this state|Uncaught exception, closing connection|Exception in callback None|deprecation: ERROR|code: 100|Running scope as unit" > "$error_log"
|
||||
fi
|
||||
|
||||
if [[ -n $SO_ERROR ]]; then
|
||||
|
||||
BIN
sigs/securityonion-2.3.140-20220718.iso.sig
Normal file
BIN
sigs/securityonion-2.3.140-20220718.iso.sig
Normal file
Binary file not shown.
BIN
sigs/securityonion-2.3.140-20220719.iso.sig
Normal file
BIN
sigs/securityonion-2.3.140-20220719.iso.sig
Normal file
Binary file not shown.
Reference in New Issue
Block a user