Merge pull request #12353 from Security-Onion-Solutions/2.4/dev

2.4.50
This commit is contained in:
Mike Reeves
2024-02-20 10:04:01 -05:00
committed by GitHub
70 changed files with 247198 additions and 498 deletions

View File

@@ -536,11 +536,10 @@ secretGroup = 4
[allowlist] [allowlist]
description = "global allow lists" description = "global allow lists"
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*'''] regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''']
paths = [ paths = [
'''gitleaks.toml''', '''gitleaks.toml''',
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
'''(go.mod|go.sum)$''', '''(go.mod|go.sum)$''',
'''salt/nginx/files/enterprise-attack.json''' '''salt/nginx/files/enterprise-attack.json'''
] ]

View File

@@ -1,17 +1,17 @@
### 2.4.40-20240116 ISO image released on 2024/01/17 ### 2.4.50-20240220 ISO image released on 2024/02/20
### Download and Verify ### Download and Verify
2.4.40-20240116 ISO image: 2.4.50-20240220 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.40-20240116.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.50-20240220.iso
MD5: AC55D027B663F3CE0878FEBDAD9DD78B MD5: BCA6476EF1BF79773D8EFB11700FDE8E
SHA1: C2B51723B17F3DC843CC493EB80E93B123E3A3E1 SHA1: 9FF0A304AA368BCD2EF2BE89AD47E65650241927
SHA256: C5F135FCF45A836BBFF58C231F95E1EA0CD894898322187AD5FBFCD24BC2F123 SHA256: 49D7695EFFF6F3C4840079BF564F3191B585639816ADE98672A38017F25E9570
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.40-20240116.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.50-20240220.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.40-20240116.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.50-20240220.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.40-20240116.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.50-20240220.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.40-20240116.iso.sig securityonion-2.4.40-20240116.iso gpg --verify securityonion-2.4.50-20240220.iso.sig securityonion-2.4.50-20240220.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Tue 16 Jan 2024 07:34:40 PM EST using RSA key ID FE507013 gpg: Signature made Fri 16 Feb 2024 11:36:25 AM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.4.40 2.4.50

View File

@@ -41,7 +41,8 @@ file_roots:
base: base:
- /opt/so/saltstack/local/salt - /opt/so/saltstack/local/salt
- /opt/so/saltstack/default/salt - /opt/so/saltstack/default/salt
- /nsm/elastic-fleet/artifacts
- /opt/so/rules/nids
# The master_roots setting configures a master-only copy of the file_roots dictionary, # The master_roots setting configures a master-only copy of the file_roots dictionary,
# used by the state compiler. # used by the state compiler.

View File

@@ -65,6 +65,7 @@ base:
- soctopus.adv_soctopus - soctopus.adv_soctopus
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
'*_sensor': '*_sensor':
- healthcheck.sensor - healthcheck.sensor
@@ -80,6 +81,8 @@ base:
- suricata.adv_suricata - suricata.adv_suricata
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
'*_eval': '*_eval':
- secrets - secrets
@@ -180,6 +183,7 @@ base:
- suricata.adv_suricata - suricata.adv_suricata
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
'*_heavynode': '*_heavynode':
- elasticsearch.auth - elasticsearch.auth
@@ -222,6 +226,8 @@ base:
- redis.adv_redis - redis.adv_redis
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
'*_receiver': '*_receiver':
- logstash.nodes - logstash.nodes

View File

@@ -102,7 +102,8 @@
'utility', 'utility',
'schedule', 'schedule',
'soctopus', 'soctopus',
'docker_clean' 'docker_clean',
'stig'
], ],
'so-managersearch': [ 'so-managersearch': [
'salt.master', 'salt.master',
@@ -123,7 +124,8 @@
'utility', 'utility',
'schedule', 'schedule',
'soctopus', 'soctopus',
'docker_clean' 'docker_clean',
'stig'
], ],
'so-searchnode': [ 'so-searchnode': [
'ssl', 'ssl',
@@ -131,7 +133,8 @@
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
'docker_clean' 'docker_clean',
'stig'
], ],
'so-standalone': [ 'so-standalone': [
'salt.master', 'salt.master',
@@ -156,7 +159,8 @@
'schedule', 'schedule',
'soctopus', 'soctopus',
'tcpreplay', 'tcpreplay',
'docker_clean' 'docker_clean',
'stig'
], ],
'so-sensor': [ 'so-sensor': [
'ssl', 'ssl',
@@ -168,13 +172,15 @@
'healthcheck', 'healthcheck',
'schedule', 'schedule',
'tcpreplay', 'tcpreplay',
'docker_clean' 'docker_clean',
'stig'
], ],
'so-fleet': [ 'so-fleet': [
'ssl', 'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'logstash', 'logstash',
'nginx',
'healthcheck', 'healthcheck',
'schedule', 'schedule',
'elasticfleet', 'elasticfleet',

View File

@@ -4,7 +4,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- common.soup_scripts
- common.packages - common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state - manager.elasticsearch # needed for elastic_curl_config state
@@ -134,6 +133,18 @@ common_sbin_jinja:
- file_mode: 755 - file_mode: 755
- template: jinja - template: jinja
{% if not GLOBALS.is_manager%}
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
# these two states remove the scripts from non manager nodes
remove_soup:
file.absent:
- name: /usr/sbin/soup
remove_so-firewall:
file.absent:
- name: /usr/sbin/so-firewall
{% endif %}
so-status_script: so-status_script:
file.managed: file.managed:
- name: /usr/sbin/so-status - name: /usr/sbin/so-status

View File

@@ -1,23 +1,70 @@
# Sync some Utilities {% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
soup_scripts: {% if SOC_GLOBAL.global.airgap %}
file.recurse: {% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
- name: /usr/sbin {% else %}
- user: root {% set UPDATE_DIR='/tmp/sogh/securityonion' %}
- group: root {% endif %}
- file_mode: 755
- source: salt://common/tools/sbin
- include_pat:
- so-common
- so-image-common
soup_manager_scripts: remove_common_soup:
file.recurse: file.absent:
- name: /usr/sbin - name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
- user: root
- group: root remove_common_so-firewall:
- file_mode: 755 file.absent:
- source: salt://manager/tools/sbin - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
- include_pat:
- so-firewall copy_so-common_common_tools_sbin:
- so-repo-sync file.copy:
- soup - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-common_sbin:
file.copy:
- name: /usr/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_sbin:
file.copy:
- name: /usr/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_sbin:
file.copy:
- name: /usr/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_sbin:
file.copy:
- name: /usr/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True

View File

@@ -366,6 +366,13 @@ is_feature_enabled() {
return 1 return 1
} }
read_feat() {
if [ -f /opt/so/log/sostatus/lks_enabled ]; then
lic_id=$(cat /opt/so/saltstack/local/pillar/soc/license.sls | grep license_id: | awk '{print $2}')
echo "$lic_id/$(cat /opt/so/log/sostatus/lks_enabled)/$(cat /opt/so/log/sostatus/fps_enabled)"
fi
}
require_manager() { require_manager() {
if is_manager_node; then if is_manager_node; then
echo "This is a manager, so we can proceed." echo "This is a manager, so we can proceed."
@@ -559,6 +566,14 @@ status () {
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n" printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
} }
sync_options() {
set_version
set_os
salt_minion_count
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)"
}
systemctl_func() { systemctl_func() {
local action=$1 local action=$1
local echo_action=$1 local echo_action=$1

View File

@@ -8,6 +8,7 @@
import sys import sys
import subprocess import subprocess
import os import os
import json
sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/') sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/')
import salt.config import salt.config
@@ -36,17 +37,63 @@ def check_needs_restarted():
with open(outfile, 'w') as f: with open(outfile, 'w') as f:
f.write(val) f.write(val)
def check_for_fps():
feat = 'fps'
feat_full = feat.replace('ps', 'ips')
fps = 0
try:
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
if result.returncode == 0:
fps = 1
except FileNotFoundError:
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
with open(fn, 'r') as f:
contents = f.read()
if '1' in contents:
fps = 1
with open('/opt/so/log/sostatus/lks_enabled', 'w') as f:
f.write(str(fps))
def check_for_lks():
feat = 'Lks'
feat_full = feat.replace('ks', 'uks')
lks = 0
result = subprocess.run(['lsblk', '-p', '-J'], check=True, stdout=subprocess.PIPE)
data = json.loads(result.stdout)
for device in data['blockdevices']:
if 'children' in device:
for gc in device['children']:
if 'children' in gc:
try:
arg = 'is' + feat_full
result = subprocess.run(['cryptsetup', arg, gc['name']], stdout=subprocess.PIPE)
if result.returncode == 0:
lks = 1
except FileNotFoundError:
for ggc in gc['children']:
if 'crypt' in ggc['type']:
lks = 1
if lks:
break
with open('/opt/so/log/sostatus/fps_enabled', 'w') as f:
f.write(str(lks))
def fail(msg): def fail(msg):
print(msg, file=sys.stderr) print(msg, file=sys.stderr)
sys.exit(1) sys.exit(1)
def main(): def main():
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8") proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
if proc.stdout.strip() != "0": if proc.stdout.strip() != "0":
fail("This program must be run as root") fail("This program must be run as root")
# Ensure that umask is 0022 so that files created by this script have rw-r-r permissions
org_umask = os.umask(0o022)
check_needs_restarted() check_needs_restarted()
check_for_fps()
check_for_lks()
# Restore umask to whatever value was set before this script was run. SXIG sets to 0077 rw---
os.umask(org_umask)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -334,6 +334,7 @@ desktop_packages:
- pulseaudio-libs - pulseaudio-libs
- pulseaudio-libs-glib2 - pulseaudio-libs-glib2
- pulseaudio-utils - pulseaudio-utils
- putty
- sane-airscan - sane-airscan
- sane-backends - sane-backends
- sane-backends-drivers-cameras - sane-backends-drivers-cameras

View File

@@ -84,6 +84,13 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-nginx-fleet-node':
final_octet: 31
port_bindings:
- 8443:8443
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-playbook': 'so-playbook':
final_octet: 32 final_octet: 32
port_bindings: port_bindings:

View File

@@ -48,6 +48,7 @@ docker:
so-logstash: *dockerOptions so-logstash: *dockerOptions
so-mysql: *dockerOptions so-mysql: *dockerOptions
so-nginx: *dockerOptions so-nginx: *dockerOptions
so-nginx-fleet-node: *dockerOptions
so-playbook: *dockerOptions so-playbook: *dockerOptions
so-redis: *dockerOptions so-redis: *dockerOptions
so-sensoroni: *dockerOptions so-sensoroni: *dockerOptions

View File

@@ -45,6 +45,8 @@ elasticfleet:
- cisco_ise - cisco_ise
- cisco_meraki - cisco_meraki
- cisco_umbrella - cisco_umbrella
- citrix_adc
- citrix_waf
- cloudflare - cloudflare
- crowdstrike - crowdstrike
- darktrace - darktrace
@@ -75,6 +77,7 @@ elasticfleet:
- mimecast - mimecast
- mysql - mysql
- netflow - netflow
- nginx
- o365 - o365
- okta - okta
- osquery_manager - osquery_manager
@@ -103,6 +106,7 @@ elasticfleet:
- udp - udp
- vsphere - vsphere
- windows - windows
- winlog
- zscaler_zia - zscaler_zia
- zscaler_zpa - zscaler_zpa
- 1password - 1password

View File

@@ -17,6 +17,11 @@ include:
- elasticfleet.sostatus - elasticfleet.sostatus
- ssl - ssl
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet:
cmd.run:
- name: so-elasticsearch-wait
# If enabled, automatically update Fleet Logstash Outputs # If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %} {% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs: so-elastic-fleet-auto-configure-logstash-outputs:
@@ -33,12 +38,26 @@ so-elastic-fleet-auto-configure-server-urls:
- retry: True - retry: True
{% endif %} {% endif %}
# Automatically update Fleet Server Elasticsearch URLs # Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
{% if grains.role not in ['so-fleet'] %} {% if grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-elasticsearch-urls: so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update - name: /usr/sbin/so-elastic-fleet-es-url-update
- retry: True - retry: True
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry: True
{% endif %}
# Sync Elastic Agent artifacts to Fleet Node
{% if grains.role in ['so-fleet'] %}
elasticagent_syncartifacts:
file.recurse:
- name: /nsm/elastic-fleet/artifacts/beats
- source: salt://beats
{% endif %} {% endif %}
{% if SERVICETOKEN != '' %} {% if SERVICETOKEN != '' %}

View File

@@ -0,0 +1,34 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "rita-logs",
"namespace": "so",
"description": "RITA Logs",
"policy_id": "so-grid-nodes_general",
"vars": {},
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/rita/beacons.csv",
"/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "rita",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
}
}
}
}
}
}

View File

@@ -1,3 +1,5 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.

View File

@@ -0,0 +1,90 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
# Only run on Managers
if ! is_manager_node; then
printf "Not a Manager Node... Exiting"
exit 0
fi
# Function to check if an array contains a value
array_contains () {
local array="$1[@]"
local seeking=$2
local in=1
for element in "${!array}"; do
if [[ $element == "$seeking" ]]; then
in=0
break
fi
done
return $in
}
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
LOGSTASHNODES='{{ salt['pillar.get']('logstash:nodes', {}) | tojson }}'
# Initialize an array for new hosts from Fleet Nodes
declare -a NEW_LIST=()
# Query for Fleet Nodes & add them to the list (Hostname)
if grep -q "fleet" <<< "$LOGSTASHNODES"; then
readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES")
for NODE in "${FLEETNODES[@]}"; do
URL="http://$NODE:8443/artifacts/"
NAME="FleetServer_$NODE"
NEW_LIST+=("$URL=$NAME")
done
fi
# Create an array for expected hosts and their names
declare -A expected_urls=(
["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}"
["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts"
)
# Merge NEW_LIST into expected_urls
for entry in "${NEW_LIST[@]}"; do
# Extract URL and Name from each entry
IFS='=' read -r URL NAME <<< "$entry"
# Add to expected_urls, automatically handling URL as key and NAME as value
expected_urls["$URL"]="$NAME"
done
# Fetch the current hosts from the API
current_urls=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host)
# Convert current hosts to an array
IFS=$'\n' read -rd '' -a current_urls_array <<<"$current_urls"
# Flag to track if any host was added
any_url_added=0
# Check each expected host
for host in "${!expected_urls[@]}"; do
array_contains current_urls_array "$host" || {
echo "$host (${expected_urls[$host]}) is missing. Adding it..."
# Prepare the JSON payload
JSON_STRING=$( jq -n \
--arg NAME "${expected_urls[$host]}" \
--arg URL "$host" \
'{"name":$NAME,"host":$URL}' )
# Create the missing host
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Flag that an artifact URL was added
any_url_added=1
}
done
if [[ $any_url_added -eq 0 ]]; then
echo "All expected artifact URLs are present. No updates needed."
fi

View File

@@ -1,3 +1,5 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.

View File

@@ -1,3 +1,5 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.

View File

@@ -1,3 +1,5 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.

View File

@@ -118,6 +118,19 @@ esingestconf:
- user: 930 - user: 930
- group: 939 - group: 939
# Auto-generate Elasticsearch ingest node pipelines from pillar
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
es_ingest_conf_{{pipeline}}:
file.managed:
- name: /opt/so/conf/elasticsearch/ingest/{{ pipeline }}
- source: salt://elasticsearch/base-template.json.jinja
- defaults:
TEMPLATE_CONFIG: {{ config }}
- template: jinja
- onchanges_in:
- file: so-pipelines-reload
{% endfor %}
eslog4jfile: eslog4jfile:
file.managed: file.managed:
- name: /opt/so/conf/elasticsearch/log4j2.properties - name: /opt/so/conf/elasticsearch/log4j2.properties

View File

@@ -55,6 +55,87 @@ elasticsearch:
key: /usr/share/elasticsearch/config/elasticsearch.key key: /usr/share/elasticsearch/config/elasticsearch.key
verification_mode: none verification_mode: none
enabled: false enabled: false
pipelines:
custom001:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom001
- pipeline:
name: common
custom002:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom002
- pipeline:
name: common
custom003:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom003
- pipeline:
name: common
custom004:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom004
- pipeline:
name: common
custom005:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom005
- pipeline:
name: common
custom006:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom006
- pipeline:
name: common
custom007:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom007
- pipeline:
name: common
custom008:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom008
- pipeline:
name: common
custom009:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom009
- pipeline:
name: common
custom010:
description: Custom Pipeline
processors:
- set:
field: tags
value: custom010
- pipeline:
name: common
index_settings: index_settings:
global_overrides: global_overrides:
index_template: index_template:
@@ -2537,6 +2618,270 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
so-logs-citrix_adc_x_interface:
index_sorting: False
index_template:
index_patterns:
- "logs-citrix_adc.interface-*"
template:
settings:
index:
lifecycle:
name: so-logs-citrix_adc.interface-logs
number_of_replicas: 0
composed_of:
- "logs-citrix_adc.interface@package"
- "logs-citrix_adc.interface@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-citrix_adc_x_lbvserver:
index_sorting: False
index_template:
index_patterns:
- "logs-citrix_adc.lbvserver-*"
template:
settings:
index:
lifecycle:
name: so-logs-citrix_adc.lbvserver-logs
number_of_replicas: 0
composed_of:
- "logs-citrix_adc.lbvserver@package"
- "logs-citrix_adc.lbvserver@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-citrix_adc_x_service:
index_sorting: False
index_template:
index_patterns:
- "logs-citrix_adc.service-*"
template:
settings:
index:
lifecycle:
name: so-logs-citrix_adc.service-logs
number_of_replicas: 0
composed_of:
- "logs-citrix_adc.service@package"
- "logs-citrix_adc.service@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-citrix_adc_x_system:
index_sorting: False
index_template:
index_patterns:
- "logs-citrix_adc.system-*"
template:
settings:
index:
lifecycle:
name: so-logs-citrix_adc.system-logs
number_of_replicas: 0
composed_of:
- "logs-citrix_adc.system@package"
- "logs-citrix_adc.system@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-citrix_adc_x_vpn:
index_sorting: False
index_template:
index_patterns:
- "logs-citrix_adc.vpn-*"
template:
settings:
index:
lifecycle:
name: so-logs-citrix_adc.vpn-logs
number_of_replicas: 0
composed_of:
- "logs-citrix_adc.vpn@package"
- "logs-citrix_adc.vpn@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-citrix_waf_x_log:
index_sorting: False
index_template:
index_patterns:
- "logs-citrix_waf.log-*"
template:
settings:
index:
lifecycle:
name: so-logs-citrix_waf.log-logs
number_of_replicas: 0
composed_of:
- "logs-citrix_waf.log@package"
- "logs-citrix_waf.log@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-cloudflare_x_audit: so-logs-cloudflare_x_audit:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -3539,6 +3884,62 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
so-logs-endpoint_x_diagnostic_x_collection:
index_sorting: false
index_template:
composed_of:
- event-mappings
- logs-endpoint.diagnostic.collection@custom
- logs-endpoint.diagnostic.collection@package
- so-fleet_globals-1
- so-fleet_agent_id_verification-1
data_stream:
allow_custom_routing: false
hidden: false
index_patterns:
- logs-endpoint.diagnostic.collection-*
priority: 501
template:
settings:
index:
lifecycle:
name: so-logs-endpoint.diagnostic.collection-logs
mapping:
total_fields:
limit: 5000
number_of_replicas: 0
sort:
field: '@timestamp'
order: desc
policy:
_meta:
managed: true
managed_by: security_onion
package:
name: elastic_agent
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-endpoint_x_events_x_api: so-logs-endpoint_x_events_x_api:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -6659,6 +7060,138 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
so-logs-nginx_x_access:
index_sorting: False
index_template:
index_patterns:
- "logs-nginx.access-*"
template:
settings:
index:
lifecycle:
name: so-logs-nginx.access-logs
number_of_replicas: 0
composed_of:
- "logs-nginx.access@package"
- "logs-nginx.access@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-nginx_x_error:
index_sorting: False
index_template:
index_patterns:
- "logs-nginx.error-*"
template:
settings:
index:
lifecycle:
name: so-logs-nginx.error-logs
number_of_replicas: 0
composed_of:
- "logs-nginx.error@package"
- "logs-nginx.error@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-metrics-nginx_x_stubstatus:
index_sorting: False
index_template:
index_patterns:
- "metrics-nginx.stubstatus-*"
template:
settings:
index:
lifecycle:
name: so-metrics-nginx.stubstatus-logs
number_of_replicas: 0
composed_of:
- "metrics-nginx.stubstatus@package"
- "metrics-nginx.stubstatus@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-o365_x_audit: so-logs-o365_x_audit:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -8854,6 +9387,50 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
so-logs-winlog_x_winlog:
index_sorting: False
index_template:
index_patterns:
- "logs-winlog.winlog-*"
template:
settings:
index:
lifecycle:
name: so-logs-winlog.winlog-logs
number_of_replicas: 0
composed_of:
- "logs-winlog.winlog@package"
- "logs-winlog.winlog@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-zscaler_zia_x_alerts: so-logs-zscaler_zia_x_alerts:
index_sorting: false index_sorting: false
index_template: index_template:

View File

@@ -67,7 +67,8 @@
{ "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } }, { "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }}, { "set": { "field": "observer.name", "value": "{{agent.name}}" }},
{ "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }}, { "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }},
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } }, { "convert" : { "field" : "scan.pe.flags","type": "string", "ignore_missing":true }},
{ "pipeline": { "name": "common" } } { "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
] ]
} }

View File

@@ -4,6 +4,7 @@
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } }, { "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } }, { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } }, { "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } }, { "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } }, { "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
@@ -12,6 +13,7 @@
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } }, { "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } }, { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } }, { "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
{ "lowercase": { "field": "network.transport", "ignore_failure": true } },
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } }, { "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } }, { "set": { "field": "observer.name", "value": "{{agent.name}}" } },
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } }, { "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },

View File

@@ -0,0 +1,21 @@
{
"description" : "suricata.ike",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_auth", "target_field": "ike.algorithm.authentication", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_enc", "target_field": "ike.algorithm.encryption", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_esn", "target_field": "ike.algorithm.esn", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_dh", "target_field": "ike.algorithm.dh", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_prf", "target_field": "ike.algorithm.prf", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.exchange_type", "target_field": "ike.exchange_type", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.payload", "target_field": "ike.payload", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.role", "target_field": "ike.role", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.init_spi", "target_field": "ike.spi.initiator", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.resp_spi", "target_field": "ike.spi.responder", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.version_major", "target_field": "ike.version.major", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.version_minor", "target_field": "ike.version.minor", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.ikev2.errors", "target_field": "ike.ikev2.errors", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -1,8 +0,0 @@
{
"description" : "suricata.ikev2",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -45,6 +45,28 @@ elasticsearch:
description: Max number of boolean clauses per query. description: Max number of boolean clauses per query.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
pipelines:
custom001: &pipelines
description:
description: Description of the ingest node pipeline
global: True
advanced: True
helpLink: elasticsearch.html
processors:
description: Processors for the ingest node pipeline
global: True
advanced: True
multiline: True
helpLink: elasticsearch.html
custom002: *pipelines
custom003: *pipelines
custom004: *pipelines
custom005: *pipelines
custom006: *pipelines
custom007: *pipelines
custom008: *pipelines
custom009: *pipelines
custom010: *pipelines
index_settings: index_settings:
global_overrides: global_overrides:
index_template: index_template:
@@ -318,6 +340,7 @@ elasticsearch:
so-logs-windows_x_powershell: *indexSettings so-logs-windows_x_powershell: *indexSettings
so-logs-windows_x_powershell_operational: *indexSettings so-logs-windows_x_powershell_operational: *indexSettings
so-logs-windows_x_sysmon_operational: *indexSettings so-logs-windows_x_sysmon_operational: *indexSettings
so-logs-winlog_x_winlog: *indexSettings
so-logs-apache_x_access: *indexSettings so-logs-apache_x_access: *indexSettings
so-logs-apache_x_error: *indexSettings so-logs-apache_x_error: *indexSettings
so-logs-auditd_x_log: *indexSettings so-logs-auditd_x_log: *indexSettings
@@ -346,6 +369,12 @@ elasticsearch:
so-logs-cisco_ftd_x_log: *indexSettings so-logs-cisco_ftd_x_log: *indexSettings
so-logs-cisco_ios_x_log: *indexSettings so-logs-cisco_ios_x_log: *indexSettings
so-logs-cisco_ise_x_log: *indexSettings so-logs-cisco_ise_x_log: *indexSettings
so-logs-citrix_adc_x_interface: *indexSettings
so-logs-citrix_adc_x_lbvserver: *indexSettings
so-logs-citrix_adc_x_service: *indexSettings
so-logs-citrix_adc_x_system: *indexSettings
so-logs-citrix_adc_x_vpn: *indexSettings
so-logs-citrix_waf_x_log: *indexSettings
so-logs-cloudflare_x_audit: *indexSettings so-logs-cloudflare_x_audit: *indexSettings
so-logs-cloudflare_x_logpull: *indexSettings so-logs-cloudflare_x_logpull: *indexSettings
so-logs-crowdstrike_x_falcon: *indexSettings so-logs-crowdstrike_x_falcon: *indexSettings
@@ -406,6 +435,8 @@ elasticsearch:
so-logs-mysql_x_error: *indexSettings so-logs-mysql_x_error: *indexSettings
so-logs-mysql_x_slowlog: *indexSettings so-logs-mysql_x_slowlog: *indexSettings
so-logs-netflow_x_log: *indexSettings so-logs-netflow_x_log: *indexSettings
so-logs-nginx_x_access: *indexSettings
so-logs-nginx_x_error: *indexSettings
so-logs-o365_x_audit: *indexSettings so-logs-o365_x_audit: *indexSettings
so-logs-okta_x_system: *indexSettings so-logs-okta_x_system: *indexSettings
so-logs-panw_x_panos: *indexSettings so-logs-panw_x_panos: *indexSettings
@@ -471,6 +502,7 @@ elasticsearch:
so-metrics-endpoint_x_metadata: *indexSettings so-metrics-endpoint_x_metadata: *indexSettings
so-metrics-endpoint_x_metrics: *indexSettings so-metrics-endpoint_x_metrics: *indexSettings
so-metrics-endpoint_x_policy: *indexSettings so-metrics-endpoint_x_policy: *indexSettings
so-metrics-nginx_x_stubstatus: *indexSettings
so-case: *indexSettings so-case: *indexSettings
so-common: *indexSettings so-common: *indexSettings
so-endgame: *indexSettings so-endgame: *indexSettings

View File

@@ -1,382 +1,383 @@
{"template": { {
"settings": { "template": {
"index": { "settings": {
"lifecycle": { "index": {
"name": "logs" "lifecycle": {
}, "name": "logs"
"codec": "best_compression", },
"default_pipeline": "logs-elastic_agent-1.13.1", "codec": "best_compression",
"mapping": { "default_pipeline": "logs-elastic_agent-1.13.1",
"total_fields": { "mapping": {
"limit": "10000" "total_fields": {
"limit": "10000"
}
},
"query": {
"default_field": [
"cloud.account.id",
"cloud.availability_zone",
"cloud.instance.id",
"cloud.instance.name",
"cloud.machine.type",
"cloud.provider",
"cloud.region",
"cloud.project.id",
"cloud.image.id",
"container.id",
"container.image.name",
"container.name",
"host.architecture",
"host.hostname",
"host.id",
"host.mac",
"host.name",
"host.os.family",
"host.os.kernel",
"host.os.name",
"host.os.platform",
"host.os.version",
"host.os.build",
"host.os.codename",
"host.type",
"ecs.version",
"agent.build.original",
"agent.ephemeral_id",
"agent.id",
"agent.name",
"agent.type",
"agent.version",
"log.level",
"message",
"elastic_agent.id",
"elastic_agent.process",
"elastic_agent.version",
"component.id",
"component.type",
"component.binary",
"component.state",
"component.old_state",
"unit.id",
"unit.type",
"unit.state",
"unit.old_state"
]
}
}
},
"mappings": {
"dynamic": false,
"dynamic_templates": [
{
"container.labels": {
"path_match": "container.labels.*",
"mapping": {
"type": "keyword"
},
"match_mapping_type": "string"
}
}
],
"properties": {
"container": {
"properties": {
"image": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
} }
},
"query": {
"default_field": [
"cloud.account.id",
"cloud.availability_zone",
"cloud.instance.id",
"cloud.instance.name",
"cloud.machine.type",
"cloud.provider",
"cloud.region",
"cloud.project.id",
"cloud.image.id",
"container.id",
"container.image.name",
"container.name",
"host.architecture",
"host.hostname",
"host.id",
"host.mac",
"host.name",
"host.os.family",
"host.os.kernel",
"host.os.name",
"host.os.platform",
"host.os.version",
"host.os.build",
"host.os.codename",
"host.type",
"ecs.version",
"agent.build.original",
"agent.ephemeral_id",
"agent.id",
"agent.name",
"agent.type",
"agent.version",
"log.level",
"message",
"elastic_agent.id",
"elastic_agent.process",
"elastic_agent.version",
"component.id",
"component.type",
"component.binary",
"component.state",
"component.old_state",
"unit.id",
"unit.type",
"unit.state",
"unit.old_state"
]
} }
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
} }
}, }
"mappings": { },
"dynamic": false, "agent": {
"dynamic_templates": [ "properties": {
{ "build": {
"container.labels": { "properties": {
"path_match": "container.labels.*", "original": {
"mapping": { "ignore_above": 1024,
"type": "keyword" "type": "keyword"
},
"match_mapping_type": "string"
} }
} }
], },
"properties": { "name": {
"container": { "ignore_above": 1024,
"properties": { "type": "keyword"
"image": { },
"properties": { "id": {
"name": { "ignore_above": 1024,
"ignore_above": 1024, "type": "keyword"
"type": "keyword" },
} "ephemeral_id": {
} "ignore_above": 1024,
}, "type": "keyword"
"name": { },
"ignore_above": 1024, "type": {
"type": "keyword" "ignore_above": 1024,
}, "type": "keyword"
"id": { },
"ignore_above": 1024, "version": {
"type": "keyword" "ignore_above": 1024,
} "type": "keyword"
}
}
},
"log": {
"properties": {
"level": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"elastic_agent": {
"properties": {
"process": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"snapshot": {
"type": "boolean"
}
}
},
"message": {
"type": "text"
},
"cloud": {
"properties": {
"availability_zone": {
"ignore_above": 1024,
"type": "keyword"
},
"image": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
} }
}, }
"agent": { },
"properties": { "instance": {
"build": { "properties": {
"properties": { "name": {
"original": { "ignore_above": 1024,
"ignore_above": 1024, "type": "keyword"
"type": "keyword" },
} "id": {
} "ignore_above": 1024,
}, "type": "keyword"
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"ephemeral_id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
} }
}, }
"log": { },
"properties": { "provider": {
"level": { "ignore_above": 1024,
"ignore_above": 1024, "type": "keyword"
"type": "keyword" },
} "machine": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
} }
}, }
"elastic_agent": { },
"properties": { "project": {
"process": { "properties": {
"ignore_above": 1024, "id": {
"type": "keyword" "ignore_above": 1024,
}, "type": "keyword"
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"snapshot": {
"type": "boolean"
}
} }
}, }
"message": { },
"type": "text" "region": {
}, "ignore_above": 1024,
"cloud": { "type": "keyword"
"properties": { },
"availability_zone": { "account": {
"ignore_above": 1024, "properties": {
"type": "keyword" "id": {
}, "ignore_above": 1024,
"image": { "type": "keyword"
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"instance": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"machine": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"project": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"region": {
"ignore_above": 1024,
"type": "keyword"
},
"account": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
},
"component": {
"properties": {
"binary": {
"ignore_above": 1024,
"type": "keyword"
},
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"unit": {
"properties": {
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"host": {
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"os": {
"properties": {
"build": {
"ignore_above": 1024,
"type": "keyword"
},
"kernel": {
"ignore_above": 1024,
"type": "keyword"
},
"codename": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"text": {
"type": "text"
}
}
},
"family": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"platform": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"domain": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "ip"
},
"containerized": {
"type": "boolean"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"mac": {
"ignore_above": 1024,
"type": "keyword"
},
"architecture": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"event": {
"properties": {
"dataset": {
"type": "constant_keyword"
}
} }
} }
} }
} }
}, },
"_meta": { "component": {
"package": { "properties": {
"name": "elastic_agent" "binary": {
}, "ignore_above": 1024,
"managed_by": "fleet", "type": "keyword"
"managed": true },
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"unit": {
"properties": {
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"host": {
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"os": {
"properties": {
"build": {
"ignore_above": 1024,
"type": "keyword"
},
"kernel": {
"ignore_above": 1024,
"type": "keyword"
},
"codename": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"text": {
"type": "text"
}
}
},
"family": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"platform": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"domain": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "ip"
},
"containerized": {
"type": "boolean"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"mac": {
"ignore_above": 1024,
"type": "keyword"
},
"architecture": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"event": {
"properties": {
"dataset": {
"type": "constant_keyword"
}
}
} }
} }
}
},
"_meta": {
"package": {
"name": "elastic_agent"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -0,0 +1,12 @@
{
"template": {
"settings": {}
},
"_meta": {
"package": {
"name": "endpoint"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -0,0 +1,132 @@
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "logs-endpoint.collection-diagnostic"
},
"codec": "best_compression",
"default_pipeline": "logs-endpoint.diagnostic.collection-8.10.2",
"mapping": {
"total_fields": {
"limit": "10000"
},
"ignore_malformed": "true"
},
"query": {
"default_field": [
"ecs.version",
"event.action",
"event.category",
"event.code",
"event.dataset",
"event.hash",
"event.id",
"event.kind",
"event.module",
"event.outcome",
"event.provider",
"event.type"
]
}
}
},
"mappings": {
"dynamic": false,
"properties": {
"@timestamp": {
"ignore_malformed": false,
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"event": {
"properties": {
"severity": {
"type": "long"
},
"code": {
"ignore_above": 1024,
"type": "keyword"
},
"created": {
"type": "date"
},
"kind": {
"ignore_above": 1024,
"type": "keyword"
},
"module": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"sequence": {
"type": "long"
},
"ingested": {
"type": "date"
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"action": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"category": {
"ignore_above": 1024,
"type": "keyword"
},
"dataset": {
"ignore_above": 1024,
"type": "keyword"
},
"hash": {
"ignore_above": 1024,
"type": "keyword"
},
"outcome": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
},
"_meta": {
"package": {
"name": "endpoint"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -14,16 +14,19 @@
}, },
"pe": { "pe": {
"properties": { "properties": {
"sections": { "flags": {
"type": "text"
},
"image_version": {
"type": "float"
},
"sections": {
"properties": { "properties": {
"entropy": { "entropy": {
"type": "float" "type": "float"
} }
} }
}, }
"image_version": {
"type": "float"
}
} }
}, },
"elf": { "elf": {

View File

@@ -95,6 +95,7 @@
{% set NODE_CONTAINERS = [ {% set NODE_CONTAINERS = [
'so-elastic-fleet', 'so-elastic-fleet',
'so-logstash', 'so-logstash',
'so-nginx-fleet-node'
] %} ] %}
{% elif GLOBALS.role == 'so-sensor' %} {% elif GLOBALS.role == 'so-sensor' %}

View File

@@ -39,7 +39,7 @@ so-idstools:
{% endif %} {% endif %}
- binds: - binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro - /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids:/opt/so/rules/nids:rw - /opt/so/rules/nids/suri:/opt/so/rules/nids/suri:rw
- /nsm/rules/:/nsm/rules/:rw - /nsm/rules/:/nsm/rules/:rw
{% if DOCKER.containers['so-idstools'].custom_bind_mounts %} {% if DOCKER.containers['so-idstools'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %}

View File

@@ -1,10 +1,10 @@
{%- from 'vars/globals.map.jinja' import GLOBALS -%} {%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%} {%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%}
--merged=/opt/so/rules/nids/all.rules --merged=/opt/so/rules/nids/suri/all.rules
--local=/opt/so/rules/nids/local.rules --local=/opt/so/rules/nids/suri/local.rules
{%- if GLOBALS.md_engine == "SURICATA" %} {%- if GLOBALS.md_engine == "SURICATA" %}
--local=/opt/so/rules/nids/extraction.rules --local=/opt/so/rules/nids/suri/extraction.rules
--local=/opt/so/rules/nids/filters.rules --local=/opt/so/rules/nids/suri/filters.rules
{%- endif %} {%- endif %}
--url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules --url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules
--disable=/opt/so/idstools/etc/disable.conf --disable=/opt/so/idstools/etc/disable.conf

View File

@@ -21,7 +21,7 @@ idstoolsetcsync:
rulesdir: rulesdir:
file.directory: file.directory:
- name: /opt/so/rules/nids - name: /opt/so/rules/nids/suri
- user: 939 - user: 939
- group: 939 - group: 939
- makedirs: True - makedirs: True
@@ -29,7 +29,7 @@ rulesdir:
# Don't show changes because all.rules can be large # Don't show changes because all.rules can be large
synclocalnidsrules: synclocalnidsrules:
file.recurse: file.recurse:
- name: /opt/so/rules/nids/ - name: /opt/so/rules/nids/suri/
- source: salt://idstools/rules/ - source: salt://idstools/rules/
- user: 939 - user: 939
- group: 939 - group: 939

View File

@@ -21,7 +21,7 @@
{% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %} {% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %}
{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('features') %} {% if KRATOSMERGED.oidc.enabled and 'odc' in salt['pillar.get']('features') %}
{% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %} {% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %}
{% endif %} {% endif %}

View File

@@ -63,6 +63,20 @@ lspipelinedir:
- user: 931 - user: 931
- group: 939 - group: 939
# Auto-generate Logstash pipeline config
{% for pipeline, config in LOGSTASH_MERGED.pipeline_config.items() %}
{% for assigned_pipeline in ASSIGNED_PIPELINES %}
{% set custom_pipeline = 'custom/' + pipeline + '.conf' %}
{% if custom_pipeline in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
ls_custom_pipeline_conf_{{assigned_pipeline}}_{{pipeline}}:
file.managed:
- name: /opt/so/conf/logstash/pipelines/{{assigned_pipeline}}/{{ pipeline }}.conf
- contents: LOGSTASH_MERGED.pipeline_config.{{pipeline}}
{% endif %}
{% endfor %}
{% endfor %}
{% for assigned_pipeline in ASSIGNED_PIPELINES %} {% for assigned_pipeline in ASSIGNED_PIPELINES %}
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %} {% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}:

View File

@@ -42,6 +42,24 @@ logstash:
custom2: [] custom2: []
custom3: [] custom3: []
custom4: [] custom4: []
pipeline_config:
custom001: |-
filter {
if [event][module] =~ "zeek" {
mutate {
add_tag => ["network_stuff"]
}
}
}
custom002: PLACEHOLDER
custom003: PLACEHOLDER
custom004: PLACEHOLDER
custom005: PLACEHOLDER
custom006: PLACEHOLDER
custom007: PLACEHOLDER
custom008: PLACEHOLDER
custom009: PLACEHOLDER
custom010: PLACEHOLDER
settings: settings:
lsheap: 500m lsheap: 500m
config: config:

View File

@@ -31,6 +31,22 @@ logstash:
custom2: *defined_pipelines custom2: *defined_pipelines
custom3: *defined_pipelines custom3: *defined_pipelines
custom4: *defined_pipelines custom4: *defined_pipelines
pipeline_config:
custom001: &pipeline_config
description: Pipeline configuration for Logstash
advanced: True
multiline: True
forcedType: string
helpLink: logstash.html
custom002: *pipeline_config
custom003: *pipeline_config
custom004: *pipeline_config
custom005: *pipeline_config
custom006: *pipeline_config
custom007: *pipeline_config
custom008: *pipeline_config
custom009: *pipeline_config
custom010: *pipeline_config
settings: settings:
lsheap: lsheap:
description: Heap size to use for logstash description: Heap size to use for logstash

View File

@@ -7,12 +7,8 @@
NOROOT=1 NOROOT=1
. /usr/sbin/so-common . /usr/sbin/so-common
set_version
set_os
salt_minion_count
set -e set -e
curl --retry 5 --retry-delay 60 -A "reposync/$VERSION/$OS/$(uname -r)/$MINIONCOUNT" https://sigs.securityonion.net/checkup --output /tmp/checkup curl --retry 5 --retry-delay 60 -A "reposync/$(sync_options)" https://sigs.securityonion.net/checkup --output /tmp/checkup
dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/ dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/
createrepo /nsm/repo createrepo /nsm/repo

View File

@@ -347,7 +347,7 @@ function syncElastic() {
[[ $? != 0 ]] && fail "Unable to read credential hashes from database" [[ $? != 0 ]] && fail "Unable to read credential hashes from database"
user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password') user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password')
if lookup_salt_value "licensed_features" "" "pillar" | grep -x oidc; then if lookup_salt_value "features" "" "pillar" | grep -x odc; then
# generate random placeholder salt/hash for users without passwords # generate random placeholder salt/hash for users without passwords
random_crypt=$(get_random_value 53) random_crypt=$(get_random_value 53)
user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/") user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/")

View File

@@ -16,12 +16,14 @@ lockFile = "/tmp/so-yaml.lock"
def showUsage(args): def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0])) print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:') print(' General commands:')
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.') print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' help - Prints this usage information.') print(' help - Prints this usage information.')
print('') print('')
print(' Where:') print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml') print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2') print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
print(' LISTITEM - Item to add to the list.')
sys.exit(1) sys.exit(1)
@@ -35,6 +37,35 @@ def writeYaml(filename, content):
file = open(filename, "w") file = open(filename, "w")
return yaml.dump(content, file) return yaml.dump(content, file)
def appendItem(content, key, listItem):
pieces = key.split(".", 1)
if len(pieces) > 1:
appendItem(content[pieces[0]], pieces[1], listItem)
else:
try:
content[key].append(listItem)
except AttributeError:
print("The existing value for the given key is not a list. No action was taken on the file.")
return 1
except KeyError:
print("The key provided does not exist. No action was taken on the file.")
return 1
def append(args):
if len(args) != 3:
print('Missing filename, key arg, or list item to append', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
listItem = args[2]
content = loadYaml(filename)
appendItem(content, key, listItem)
writeYaml(filename, content)
return 0
def removeKey(content, key): def removeKey(content, key):
pieces = key.split(".", 1) pieces = key.split(".", 1)
@@ -69,6 +100,7 @@ def main():
commands = { commands = {
"help": showUsage, "help": showUsage,
"append": append,
"remove": remove, "remove": remove,
} }

View File

@@ -105,3 +105,99 @@ class TestRemove(unittest.TestCase):
self.assertEqual(actual, expected) self.assertEqual(actual, expected)
sysmock.assert_called_once_with(1) sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_append(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.append([filename, "key3", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\n- d\n"
self.assertEqual(actual, expected)
def test_append_nested(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.append([filename, "key1.child2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_append_nested_deep(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.append([filename, "key1.child2.deep2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_append_key_noexist(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key4", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
def test_append_key_noexist_deep(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
def test_append_key_nonlist(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key1", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
def test_append_key_nonlist_deep(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")

View File

@@ -372,6 +372,17 @@ enable_highstate() {
echo "" echo ""
} }
get_soup_script_hashes() {
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
}
highstate() { highstate() {
# Run a highstate. # Run a highstate.
salt-call state.highstate -l info queue=True salt-call state.highstate -l info queue=True
@@ -405,6 +416,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20 [[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30 [[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
[[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40 [[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
true true
} }
@@ -419,6 +431,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20 [[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30 [[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40 [[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
true true
} }
@@ -470,6 +483,11 @@ post_to_2.4.40() {
POSTVERSION=2.4.40 POSTVERSION=2.4.40
} }
post_to_2.4.50() {
echo "Nothing to apply"
POSTVERSION=2.4.50
}
repo_sync() { repo_sync() {
echo "Sync the local repo." echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -570,6 +588,35 @@ up_to_2.4.40() {
INSTALLEDVERSION=2.4.40 INSTALLEDVERSION=2.4.40
} }
up_to_2.4.50() {
echo "Creating additional pillars.."
mkdir -p /opt/so/saltstack/local/pillar/stig/
touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls
touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls
# the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots
# put new so-yaml in place
echo "Updating so-yaml"
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/"
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/
echo "Creating a backup of the salt-master config."
# INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version
# so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until
# post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup
cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak"
echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml"
so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids
echo "Moving Suricata rules"
mkdir /opt/so/rules/nids/suri
chown socore:socore /opt/so/rules/nids/suri
mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/.
echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml"
so-yaml.py append /etc/salt/master file_roots.base /nsm/elastic-fleet/artifacts
INSTALLEDVERSION=2.4.50
}
determine_elastic_agent_upgrade() { determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap update_elastic_agent_airgap
@@ -742,31 +789,29 @@ upgrade_salt() {
} }
verify_latest_update_script() { verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one. get_soup_script_hashes
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "This version of the soup script is up to date. Proceeding." echo "This version of the soup script is up to date. Proceeding."
else else
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
# Verify that soup scripts updated as expected
get_soup_script_hashes
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "Succesfully updated soup scripts."
else
echo "There was a problem updating soup scripts. Trying to rerun script update."
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
fi
echo "" echo ""
echo "The soup script has been modified. Please run soup again to continue the upgrade." echo "The soup script has been modified. Please run soup again to continue the upgrade."
exit 0 exit 0
fi fi
}
}
# Keeping this block in case we need to do a hotfix that requires salt update # Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() { apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
@@ -909,9 +954,6 @@ main() {
systemctl_func "stop" "$cron_service_name" systemctl_func "stop" "$cron_service_name"
# update mine items prior to stopping salt-minion and salt-master
update_salt_mine
echo "Updating dockers to $NEWVERSION." echo "Updating dockers to $NEWVERSION."
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
airgap_update_dockers airgap_update_dockers
@@ -987,6 +1029,9 @@ main() {
salt-call state.apply salt.minion -l info queue=True salt-call state.apply salt.minion -l info queue=True
echo "" echo ""
# ensure the mine is updated and populated before highstates run, following the salt-master restart
update_salt_mine
enable_highstate enable_highstate
echo "" echo ""

View File

@@ -14,6 +14,9 @@ include:
- nginx.config - nginx.config
- nginx.sostatus - nginx.sostatus
{% if grains.role not in ['so-fleet'] %}
{# if the user has selected to replace the crt and key in the ui #} {# if the user has selected to replace the crt and key in the ui #}
{% if NGINXMERGED.ssl.replace_cert %} {% if NGINXMERGED.ssl.replace_cert %}
@@ -88,6 +91,15 @@ make-rule-dir-nginx:
- recurse: - recurse:
- user - user
- group - group
{% endif %}
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
{% if GLOBALS.role == 'so-fleet' %}
{% set container_config = 'so-nginx-fleet-node' %}
{% else %}
{% set container_config = 'so-nginx' %}
{% endif %}
so-nginx: so-nginx:
docker_container.running: docker_container.running:
@@ -95,11 +107,11 @@ so-nginx:
- hostname: so-nginx - hostname: so-nginx
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }} - ipv4_address: {{ DOCKER.containers[container_config].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-nginx'].extra_hosts %} {% if DOCKER.containers[container_config].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-nginx'].extra_hosts %} {% for XTRAHOST in DOCKER.containers[container_config].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -119,20 +131,20 @@ so-nginx:
- /nsm/repo:/opt/socore/html/repo:ro - /nsm/repo:/opt/socore/html/repo:ro
- /nsm/rules:/nsm/rules:ro - /nsm/rules:/nsm/rules:ro
{% endif %} {% endif %}
{% if DOCKER.containers['so-nginx'].custom_bind_mounts %} {% if DOCKER.containers[container_config].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-nginx'].custom_bind_mounts %} {% for BIND in DOCKER.containers[container_config].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKER.containers['so-nginx'].extra_env %} {% if DOCKER.containers[container_config].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKER.containers['so-nginx'].extra_env %} {% for XTRAENV in DOCKER.containers[container_config].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- cap_add: NET_BIND_SERVICE - cap_add: NET_BIND_SERVICE
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-nginx'].port_bindings %} {% for BINDING in DOCKER.containers[container_config].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- watch: - watch:

View File

@@ -39,6 +39,26 @@ http {
include /etc/nginx/conf.d/*.conf; include /etc/nginx/conf.d/*.conf;
{%- if role in ['fleet'] %}
server {
listen 8443;
server_name {{ GLOBALS.hostname }};
root /opt/socore/html;
location /artifacts/ {
try_files $uri =206;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
}
{%- endif %}
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %} {%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
server { server {

View File

@@ -7,6 +7,7 @@ logfile=/var/log/yum.log
exactarch=1 exactarch=1
obsoletes=1 obsoletes=1
gpgcheck=1 gpgcheck=1
localpkg_gpgcheck=1
plugins=1 plugins=1
installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }} installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum

View File

@@ -1,4 +1,4 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt: salt:
master: master:
version: 3006.5 version: 3006.6

View File

@@ -1,6 +1,6 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt: salt:
minion: minion:
version: 3006.5 version: 3006.6
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds. service_start_delay: 30 # in seconds.

View File

@@ -9,7 +9,7 @@ soc:
icon: fa-crosshairs icon: fa-crosshairs
target: target:
links: links:
- '/#/hunt?q="{value|escape}" | groupby event.module* event.dataset' - '/#/hunt?q="{value|escape}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- name: actionAddToCase - name: actionAddToCase
description: actionAddToCaseHelp description: actionAddToCaseHelp
icon: fa-briefcase icon: fa-briefcase
@@ -23,13 +23,13 @@ soc:
icon: fab fa-searchengin icon: fab fa-searchengin
target: '' target: ''
links: links:
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset' - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* event.dataset' - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* event.dataset' - '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset' - '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- '/#/hunt?q="{:log.id.fuid}" | groupby event.module* event.dataset' - '/#/hunt?q="{:log.id.fuid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- '/#/hunt?q="{:log.id.uid}" | groupby event.module* event.dataset' - '/#/hunt?q="{:log.id.uid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- '/#/hunt?q="{:network.community_id}" | groupby event.module* event.dataset' - '/#/hunt?q="{:network.community_id}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- name: actionPcap - name: actionPcap
description: actionPcapHelp description: actionPcapHelp
icon: fa-stream icon: fa-stream
@@ -59,12 +59,18 @@ soc:
target: _blank target: _blank
links: links:
- 'https://www.virustotal.com/gui/search/{value}' - 'https://www.virustotal.com/gui/search/{value}'
- name: Sublime Platform Email Review - name: actionSublime
description: Review email in Sublime Platform description: actionSublimeHelp
icon: fa-external-link-alt icon: fa-external-link-alt
target: _blank target: _blank
links: links:
- 'https://{:sublime.url}/messages/{:sublime.message_group_id}' - 'https://{:sublime.url}/messages/{:sublime.message_group_id}'
- name: actionProcessAncestors
description: actionProcessAncestorsHelp
icon: fa-people-roof
target: ''
links:
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.module event.dataset | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory'
eventFields: eventFields:
default: default:
- soc_timestamp - soc_timestamp
@@ -1411,7 +1417,7 @@ soc:
query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data' query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data'
- name: Host Process Activity - name: Host Process Activity
description: Process activity captured on an endpoint description: Process activity captured on an endpoint
query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable' query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory'
- name: Host File Activity - name: Host File Activity
description: File activity captured on an endpoint description: File activity captured on an endpoint
query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable' query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable'
@@ -1424,8 +1430,11 @@ soc:
- name: Zeek Notice - name: Zeek Notice
description: Zeek notice logs description: Zeek notice logs
query: 'event.dataset:zeek.notice | groupby -sankey notice.note destination.ip | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' query: 'event.dataset:zeek.notice | groupby -sankey notice.note destination.ip | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name'
- name: Connections - name: Connections and Metadata with community_id
description: Network connection metadata description: Network connections that include community_id
query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- name: Connections seen by Zeek or Suricata
description: Network connections logged by Zeek or Suricata
query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui' query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui'
- name: DCE_RPC - name: DCE_RPC
description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata
@@ -1562,6 +1571,9 @@ soc:
- name: Firewall - name: Firewall
description: Firewall logs description: Firewall logs
query: 'observer.type:firewall | groupby -sankey event.action observer.ingress.interface.name | groupby event.action | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' query: 'observer.type:firewall | groupby -sankey event.action observer.ingress.interface.name | groupby event.action | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: Firewall Auth
description: Firewall authentication logs
query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message'
- name: VLAN - name: VLAN
description: VLAN (Virtual Local Area Network) tagged logs description: VLAN (Virtual Local Area Network) tagged logs
query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name' query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name'

View File

@@ -2,6 +2,10 @@ trusttheca:
file.absent: file.absent:
- name: /etc/pki/tls/certs/intca.crt - name: /etc/pki/tls/certs/intca.crt
symlinkca:
file.absent:
- name: /etc/ssl/certs/intca.crt
influxdb_key: influxdb_key:
file.absent: file.absent:
- name: /etc/pki/influxdb.key - name: /etc/pki/influxdb.key

3
salt/stig/defaults.yaml Normal file
View File

@@ -0,0 +1,3 @@
stig:
enabled: False
run_interval: 12

15
salt/stig/disabled.sls Normal file
View File

@@ -0,0 +1,15 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
stig_remediate_schedule:
schedule.absent
remove_stig_script:
file.absent:
- name: /usr/sbin/so-stig
{% endif %}

104
salt/stig/enabled.sls Normal file
View File

@@ -0,0 +1,104 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states and GLOBALS.os == 'OEL' %}
{% if 'stg' in salt['pillar.get']('features', []) %}
{% set OSCAP_PROFILE_NAME = 'xccdf_org.ssgproject.content_profile_stig' %}
{% set OSCAP_PROFILE_LOCATION = '/opt/so/conf/stig/sos-oscap.xml' %}
{% set OSCAP_OUTPUT_DIR = '/opt/so/log/stig' %}
oscap_packages:
pkg.installed:
- skip_suggestions: True
- pkgs:
- openscap
- openscap-scanner
- scap-security-guide
make_some_dirs:
file.directory:
- name: /opt/so/log/stig
- user: socore
- group: socore
- makedirs: True
make_more_dir:
file.directory:
- name: /opt/so/conf/stig
- user: socore
- group: socore
- makedirs: True
update_stig_profile:
file.managed:
- name: /opt/so/conf/stig/sos-oscap.xml
- source: salt://stig/files/sos-oscap.xml
- user: socore
- group: socore
- mode: 0644
{% if not salt['file.file_exists'](OSCAP_OUTPUT_DIR ~ '/pre-oscap-report.html') %}
run_initial_scan:
module.run:
- name: openscap.xccdf
- params: 'eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/pre-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/pre-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}'
{% endif %}
run_remediate:
module.run:
- name: openscap.xccdf
- params: 'eval --remediate --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/post-oscap-results.xml --report {{ OSCAP_PROFILE_LOCATION }}'
{# OSCAP rule id: xccdf_org.ssgproject.content_rule_disable_ctrlaltdel_burstaction #}
disable_ctrl_alt_del_action:
file.replace:
- name: /etc/systemd/system.conf
- pattern: '^#CtrlAltDelBurstAction=none'
- repl: 'CtrlAltDelBurstAction=none'
- backup: '.bak'
{# OSCAP rule id: xccdf_org.ssgproject.content_rule_no_empty_passwords #}
remove_nullok_from_password_auth:
file.replace:
- name: /etc/pam.d/password-auth
- pattern: ' nullok'
- repl: ''
- backup: '.bak'
remove_nullok_from_system_auth_auth:
file.replace:
- name: /etc/pam.d/system-auth
- pattern: ' nullok'
- repl: ''
- backup: '.bak'
run_post_scan:
module.run:
- name: openscap.xccdf
- params: 'eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/post-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/post-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}'
{% else %}
{{sls}}_no_license_detected:
test.fail_without_changes:
- name: {{sls}}_no_license_detected
- comment:
- "The application of STIGs is a feature supported only for customers with a valid license.
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
for more information about purchasing a license to enable this feature."
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

244945
salt/stig/files/sos-oscap.xml Normal file

File diff suppressed because one or more lines are too long

16
salt/stig/init.sls Normal file
View File

@@ -0,0 +1,16 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'stig/map.jinja' import STIGMERGED %}
include:
{% if STIGMERGED.enabled %}
- stig.schedule
{% if not salt['schedule.is_enabled'](name="stig_remediate_schedule") %}
- stig.enabled
{% endif %}
{% else %}
- stig.disabled
{% endif %}

7
salt/stig/map.jinja Normal file
View File

@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'stig/defaults.yaml' as STIGDEFAULTS with context %}
{% set STIGMERGED = salt['pillar.get']('stig', STIGDEFAULTS.stig, merge=True) %}

24
salt/stig/schedule.sls Normal file
View File

@@ -0,0 +1,24 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'stig/map.jinja' import STIGMERGED %}
{% if 'stg' in salt['pillar.get']('features', []) %}
stig_remediate_schedule:
schedule.present:
- function: state.apply
- job_args:
- stig.enabled
- hours: {{ STIGMERGED.run_interval }}
- maxrunning: 1
- enabled: true
{% else %}
{{sls}}_no_license_detected:
test.fail_without_changes:
- name: {{sls}}_no_license_detected
- comment:
- "The application of STIGs is a feature supported only for customers with a valid license.
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
for more information about purchasing a license to enable this feature."
{% endif %}

11
salt/stig/soc_stig.yaml Normal file
View File

@@ -0,0 +1,11 @@
stig:
enabled:
description: You can enable or disable the application of STIGS using oscap. Note that the actions performed by OSCAP are not automatically reversible.
forcedType: bool
advanced: True
run_interval:
description: The interval in hours between OSCAP remediate executions.
forcedType: int
regex: ^([1-9][0-9]{0,2})$
regexFailureMessage: The value must be an integer between 1 and 999.
advanced: True

View File

@@ -17,9 +17,10 @@ strelka:
mime_db: '/usr/lib/file/magic.mgc' mime_db: '/usr/lib/file/magic.mgc'
yara_rules: '/etc/strelka/taste/' yara_rules: '/etc/strelka/taste/'
scanners: scanners:
'ScanBase64': 'ScanBase64PE':
- positive: - positive:
filename: '^base64_' flavors:
- 'base64_pe'
priority: 5 priority: 5
'ScanBatch': 'ScanBatch':
- positive: - positive:
@@ -27,12 +28,27 @@ strelka:
- 'text/x-msdos-batch' - 'text/x-msdos-batch'
- 'batch_file' - 'batch_file'
priority: 5 priority: 5
'ScanBmpEof':
- positive:
flavors:
- 'image/x-ms-bmp'
- 'bmp_file'
negative:
source:
- 'ScanTranscode'
priority: 5
'ScanBzip2': 'ScanBzip2':
- positive: - positive:
flavors: flavors:
- 'application/x-bzip2' - 'application/x-bzip2'
- 'bzip2_file' - 'bzip2_file'
priority: 5 priority: 5
'ScanDmg':
- positive:
flavors:
- 'dmg_disk_image'
- 'hfsplus_disk_image'
priority: 5
'ScanDocx': 'ScanDocx':
- positive: - positive:
flavors: flavors:
@@ -40,6 +56,11 @@ strelka:
priority: 5 priority: 5
options: options:
extract_text: False extract_text: False
'ScanDonut':
- positive:
flavors:
- 'hacktool_win_shellcode_donut'
priority: 5
'ScanElf': 'ScanElf':
- positive: - positive:
flavors: flavors:
@@ -56,6 +77,26 @@ strelka:
- 'message/rfc822' - 'message/rfc822'
- 'email_file' - 'email_file'
priority: 5 priority: 5
'ScanEncryptedDoc':
- positive:
flavors:
- 'encrypted_word_document'
priority: 5
options:
max_length: 5
scanner_timeout: 150
log_pws: True
password_file: "/etc/strelka/passwords.dat"
'ScanEncryptedZip':
- positive:
flavors:
- 'encrypted_zip'
priority: 5
options:
max_length: 5
scanner_timeout: 150
log_pws: True
password_file: '/etc/strelka/passwords.dat'
'ScanEntropy': 'ScanEntropy':
- positive: - positive:
flavors: flavors:
@@ -111,6 +152,16 @@ strelka:
priority: 5 priority: 5
options: options:
tmp_directory: '/dev/shm/' tmp_directory: '/dev/shm/'
'ScanFooter':
- positive:
flavors:
- '*'
priority: 5
options:
length: 50
encodings:
- classic
- backslash
'ScanGif': 'ScanGif':
- positive: - positive:
flavors: flavors:
@@ -144,13 +195,25 @@ strelka:
- 'html_file' - 'html_file'
priority: 5 priority: 5
options: options:
parser: "html5lib" max_hyperlinks: 50
'ScanIqy':
- positive:
flavors:
- 'iqy_file'
priority: 5
'ScanIni': 'ScanIni':
- positive: - positive:
filename: '(\.([Cc][Ff][Gg]|[Ii][Nn][Ii])|PROJECT)$' filename: '(\.([Cc][Ff][Gg]|[Ii][Nn][Ii])|PROJECT)$'
flavors: flavors:
- 'ini_file' - 'ini_file'
priority: 5 priority: 5
'ScanIso':
- positive:
flavors:
- 'application/x-iso9660-image'
priority: 5
options:
limit: 50
'ScanJarManifest': 'ScanJarManifest':
- positive: - positive:
flavors: flavors:
@@ -198,6 +261,25 @@ strelka:
priority: 5 priority: 5
options: options:
limit: 1000 limit: 1000
'ScanLNK':
- positive:
flavors:
- 'lnk_file'
priority: 5
'ScanLsb':
- positive:
flavors:
- 'image/png'
- 'png_file'
- 'image/jpeg'
- 'jpeg_file'
- 'image/x-ms-bmp'
- 'bmp_file'
- 'image/webp'
negative:
source:
- 'ScanTranscode'
priority: 5
'ScanLzma': 'ScanLzma':
- positive: - positive:
flavors: flavors:
@@ -214,6 +296,36 @@ strelka:
priority: 5 priority: 5
options: options:
tmp_directory: '/dev/shm/' tmp_directory: '/dev/shm/'
'ScanManifest':
- positive:
flavors:
- 'browser_manifest'
priority: 5
'ScanMsi':
- positive:
flavors:
- "image/vnd.fpx"
- "application/vnd.ms-msi"
- "application/x-msi"
priority: 5
options:
tmp_directory: '/dev/shm/'
keys:
- 'Author'
- 'Characters'
- 'Company'
- 'CreateDate'
- 'LastModifiedBy'
- 'Lines'
- 'ModifyDate'
- 'Pages'
- 'Paragraphs'
- 'RevisionNumber'
- 'Software'
- 'Template'
- 'Title'
- 'TotalEditTime'
- 'Words'
'ScanOcr': 'ScanOcr':
- positive: - positive:
flavors: flavors:
@@ -236,6 +348,13 @@ strelka:
- 'application/msword' - 'application/msword'
- 'olecf_file' - 'olecf_file'
priority: 5 priority: 5
'ScanOnenote':
- positive:
flavors:
- 'application/onenote'
- 'application/msonenote'
- 'onenote_file'
priority: 5
'ScanPdf': 'ScanPdf':
- positive: - positive:
flavors: flavors:
@@ -285,6 +404,30 @@ strelka:
- 'ProgramArguments' - 'ProgramArguments'
- 'RunAtLoad' - 'RunAtLoad'
- 'StartInterval' - 'StartInterval'
'ScanPngEof':
- positive:
flavors:
- 'image/png'
- 'png_file'
negative:
source:
- 'ScanTranscode'
priority: 5
'ScanQr':
- positive:
flavors:
- 'image/jpeg'
- 'jpeg_file'
- 'image/png'
- 'png_file'
- 'image/tiff'
- 'type_is_tiff'
- 'image/x-ms-bmp'
- 'bmp_file'
- 'image/webp'
priority: 5
options:
support_inverted: True
'ScanRar': 'ScanRar':
- positive: - positive:
flavors: flavors:
@@ -309,6 +452,19 @@ strelka:
priority: 5 priority: 5
options: options:
limit: 1000 limit: 1000
'ScanSevenZip':
- positive:
flavors:
- 'application/x-7z-compressed'
- '_7zip_file'
- "image/vnd.fpx"
- "application/vnd.ms-msi"
- "application/x-msi"
priority: 5
options:
scanner_timeout: 150
crack_pws: True
log_pws: True
'ScanSwf': 'ScanSwf':
- positive: - positive:
flavors: flavors:
@@ -351,6 +507,7 @@ strelka:
flavors: flavors:
- 'vb_file' - 'vb_file'
- 'vbscript' - 'vbscript'
- 'hta_file'
priority: 5 priority: 5
'ScanVba': 'ScanVba':
- positive: - positive:
@@ -362,6 +519,20 @@ strelka:
priority: 5 priority: 5
options: options:
analyze_macros: True analyze_macros: True
'ScanVhd':
- positive:
flavors:
- 'application/x-vhd'
- 'vhd_file'
- 'vhdx_file'
priority: 5
options:
limit: 100
'ScanVsto':
- positive:
flavors:
- 'vsto_file'
priority: 5
'ScanX509': 'ScanX509':
- positive: - positive:
flavors: flavors:
@@ -391,6 +562,12 @@ strelka:
priority: 5 priority: 5
options: options:
location: '/etc/yara/' location: '/etc/yara/'
compiled:
enabled: False
filename: "rules.compiled"
store_offset: True
offset_meta_key: "StrelkaHexDump"
offset_padding: 32
'ScanZip': 'ScanZip':
- positive: - positive:
flavors: flavors:
@@ -530,6 +707,20 @@ strelka:
ttl: 1h ttl: 1h
response: response:
log: "/var/log/strelka/strelka.log" log: "/var/log/strelka/strelka.log"
broker:
bootstrap: "PLACEHOLDER"
protocol: "PLACEHOLDER"
certlocation: "PLACEHOLDER"
keylocation: "PLACEHOLDER"
calocation: "PLACEHOLDER"
topic: "PLACEHOLDER"
s3redundancy: "PLACEHOLDER - This should be a boolean value"
s3:
accesskey: "PLACEHOLDER"
secretkey: "PLACEHOLDER"
bucketName: "PLACEHOLDER"
region: "PLACEHOLDER"
endpoint: "PLACEHOLDER"
manager: manager:
enabled: False enabled: False
config: config:

View File

@@ -84,10 +84,12 @@ suridatadir:
- mode: 770 - mode: 770
- makedirs: True - makedirs: True
# salt:// would resolve to /opt/so/rules/nids because of the defined file_roots and
# not existing under /opt/so/saltstack/local/salt or /opt/so/saltstack/default/salt
surirulesync: surirulesync:
file.recurse: file.recurse:
- name: /opt/so/conf/suricata/rules/ - name: /opt/so/conf/suricata/rules/
- source: salt://suricata/rules/ - source: salt://suri/
- user: 940 - user: 940
- group: 940 - group: 940
- show_changes: False - show_changes: False

View File

@@ -13,7 +13,7 @@ ruleslink:
- name: /opt/so/saltstack/local/salt/suricata/rules - name: /opt/so/saltstack/local/salt/suricata/rules
- user: socore - user: socore
- group: socore - group: socore
- target: /opt/so/rules/nids - target: /opt/so/rules/nids/suri
refresh_salt_master_fileserver_suricata_ruleslink: refresh_salt_master_fileserver_suricata_ruleslink:
salt.runner: salt.runner:
@@ -27,4 +27,4 @@ refresh_salt_master_fileserver_suricata_ruleslink:
test.fail_without_changes: test.fail_without_changes:
- name: {{sls}}_state_not_allowed - name: {{sls}}_state_not_allowed
{% endif %} {% endif %}

View File

@@ -36,6 +36,7 @@ telegraf:
- suriloss.sh - suriloss.sh
- zeekcaptureloss.sh - zeekcaptureloss.sh
- zeekloss.sh - zeekloss.sh
- features.sh
manager: manager:
- influxdbsize.sh - influxdbsize.sh
- lasthighstate.sh - lasthighstate.sh
@@ -43,6 +44,7 @@ telegraf:
- raid.sh - raid.sh
- redis.sh - redis.sh
- sostatus.sh - sostatus.sh
- features.sh
managersearch: managersearch:
- eps.sh - eps.sh
- influxdbsize.sh - influxdbsize.sh
@@ -51,6 +53,7 @@ telegraf:
- raid.sh - raid.sh
- redis.sh - redis.sh
- sostatus.sh - sostatus.sh
- features.sh
import: import:
- influxdbsize.sh - influxdbsize.sh
- lasthighstate.sh - lasthighstate.sh
@@ -67,6 +70,7 @@ telegraf:
- suriloss.sh - suriloss.sh
- zeekcaptureloss.sh - zeekcaptureloss.sh
- zeekloss.sh - zeekloss.sh
- features.sh
heavynode: heavynode:
- checkfiles.sh - checkfiles.sh
- eps.sh - eps.sh
@@ -90,6 +94,7 @@ telegraf:
- os.sh - os.sh
- raid.sh - raid.sh
- sostatus.sh - sostatus.sh
- features.sh
receiver: receiver:
- eps.sh - eps.sh
- lasthighstate.sh - lasthighstate.sh

View File

@@ -0,0 +1,17 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
FPS_ENABLED=$(cat /var/log/sostatus/fps_enabled)
LKS_ENABLED=$(cat /var/log/sostatus/lks_enabled)
echo "features fps=$FPS_ENABLED"
echo "features lks=$LKS_ENABLED"
fi
exit 0

View File

@@ -1,5 +1,5 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
@@ -46,6 +46,7 @@ base:
- zeek - zeek
- strelka - strelka
- elasticfleet.install_agent_grid - elasticfleet.install_agent_grid
- stig
'*_eval and G@saltversion:{{saltversion}}': '*_eval and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -110,6 +111,7 @@ base:
- soctopus - soctopus
- playbook - playbook
- elasticfleet - elasticfleet
- stig
'*_standalone and G@saltversion:{{saltversion}}': '*_standalone and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -128,7 +130,7 @@ base:
- sensoroni - sensoroni
- telegraf - telegraf
- idstools - idstools
- suricata.manager - suricata.manager
- healthcheck - healthcheck
- mysql - mysql
- elasticsearch - elasticsearch
@@ -146,6 +148,7 @@ base:
- soctopus - soctopus
- playbook - playbook
- elasticfleet - elasticfleet
- stig
'*_searchnode and G@saltversion:{{saltversion}}': '*_searchnode and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -157,6 +160,7 @@ base:
- elasticsearch - elasticsearch
- logstash - logstash
- elasticfleet.install_agent_grid - elasticfleet.install_agent_grid
- stig
'*_managersearch and G@saltversion:{{saltversion}}': '*_managersearch and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -187,6 +191,7 @@ base:
- soctopus - soctopus
- playbook - playbook
- elasticfleet - elasticfleet
- stig
'*_heavynode and G@saltversion:{{saltversion}}': '*_heavynode and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -206,7 +211,7 @@ base:
- zeek - zeek
- elasticfleet.install_agent_grid - elasticfleet.install_agent_grid
- elasticagent - elasticagent
'*_import and G@saltversion:{{saltversion}}': '*_import and G@saltversion:{{saltversion}}':
- match: compound - match: compound
- salt.master - salt.master
@@ -259,6 +264,7 @@ base:
- telegraf - telegraf
- firewall - firewall
- logstash - logstash
- nginx
- elasticfleet - elasticfleet
- elasticfleet.install_agent_grid - elasticfleet.install_agent_grid
- schedule - schedule

View File

@@ -1413,7 +1413,7 @@ make_some_dirs() {
mkdir -p $local_salt_dir/salt/firewall/portgroups mkdir -p $local_salt_dir/salt/firewall/portgroups
mkdir -p $local_salt_dir/salt/firewall/ports mkdir -p $local_salt_dir/salt/firewall/ports
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc soctopus docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert global;do for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc soctopus docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do
mkdir -p $local_salt_dir/pillar/$THEDIR mkdir -p $local_salt_dir/pillar/$THEDIR
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
@@ -1600,6 +1600,9 @@ reinstall_init() {
salt-call -l info saltutil.kill_all_jobs --local salt-call -l info saltutil.kill_all_jobs --local
fi fi
logCmd "salt-call state.apply ca.remove -linfo --local --file-root=../salt"
logCmd "salt-call state.apply ssl.remove -linfo --local --file-root=../salt"
# Kill any salt processes (safely) # Kill any salt processes (safely)
for service in "${salt_services[@]}"; do for service in "${salt_services[@]}"; do
# Stop the service in the background so we can exit after a certain amount of time # Stop the service in the background so we can exit after a certain amount of time
@@ -1621,9 +1624,6 @@ reinstall_init() {
done done
done done
logCmd "salt-call state.apply ca.remove -linfo --local --file-root=../salt"
logCmd "salt-call state.apply ssl.remove -linfo --local --file-root=../salt"
# Remove all salt configs # Remove all salt configs
rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/ rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/
@@ -1933,7 +1933,11 @@ saltify() {
logCmd "dnf -y install salt-$SALTVERSION salt-master-$SALTVERSION salt-minion-$SALTVERSION" logCmd "dnf -y install salt-$SALTVERSION salt-master-$SALTVERSION salt-minion-$SALTVERSION"
else else
# We just need the minion # We just need the minion
logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION" if [[ $is_airgap ]]; then
logCmd "dnf -y install salt salt-minion"
else
logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION"
fi
fi fi
fi fi
@@ -2148,11 +2152,12 @@ set_default_log_size() {
esac esac
local disk_dir="/" local disk_dir="/"
if [ -d /nsm ]; then if mountpoint -q /nsm; then
disk_dir="/nsm" disk_dir="/nsm"
fi fi
if [ -d /nsm/elasticsearch ]; then if mountpoint -q /nsm/elasticsearch; then
disk_dir="/nsm/elasticsearch" disk_dir="/nsm/elasticsearch"
percentage=80
fi fi
local disk_size_1k local disk_size_1k

Binary file not shown.