mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-21 08:23:08 +01:00
Merge remote-tracking branch 'origin/2.4/dev' into kilo
This commit is contained in:
@@ -41,7 +41,8 @@ file_roots:
|
||||
base:
|
||||
- /opt/so/saltstack/local/salt
|
||||
- /opt/so/saltstack/default/salt
|
||||
|
||||
- /nsm/elastic-fleet/artifacts
|
||||
- /opt/so/rules/nids
|
||||
|
||||
# The master_roots setting configures a master-only copy of the file_roots dictionary,
|
||||
# used by the state compiler.
|
||||
|
||||
@@ -180,6 +180,7 @@
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'logstash',
|
||||
'nginx',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'elasticfleet',
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- common.soup_scripts
|
||||
- common.packages
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- manager.elasticsearch # needed for elastic_curl_config state
|
||||
@@ -134,6 +133,18 @@ common_sbin_jinja:
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
|
||||
{% if not GLOBALS.is_manager%}
|
||||
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
|
||||
# these two states remove the scripts from non manager nodes
|
||||
remove_soup:
|
||||
file.absent:
|
||||
- name: /usr/sbin/soup
|
||||
|
||||
remove_so-firewall:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-firewall
|
||||
{% endif %}
|
||||
|
||||
so-status_script:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-status
|
||||
|
||||
@@ -1,23 +1,70 @@
|
||||
# Sync some Utilities
|
||||
soup_scripts:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 755
|
||||
- source: salt://common/tools/sbin
|
||||
- include_pat:
|
||||
- so-common
|
||||
- so-image-common
|
||||
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
|
||||
{% if SOC_GLOBAL.global.airgap %}
|
||||
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
|
||||
{% else %}
|
||||
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
|
||||
{% endif %}
|
||||
|
||||
soup_manager_scripts:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 755
|
||||
- source: salt://manager/tools/sbin
|
||||
- include_pat:
|
||||
- so-firewall
|
||||
- so-repo-sync
|
||||
- soup
|
||||
remove_common_soup:
|
||||
file.absent:
|
||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
|
||||
|
||||
remove_common_so-firewall:
|
||||
file.absent:
|
||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
|
||||
|
||||
copy_so-common_common_tools_sbin:
|
||||
file.copy:
|
||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
|
||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-image-common_common_tools_sbin:
|
||||
file.copy:
|
||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common
|
||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_soup_manager_tools_sbin:
|
||||
file.copy:
|
||||
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/soup
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-firewall_manager_tools_sbin:
|
||||
file.copy:
|
||||
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-firewall
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-common_sbin:
|
||||
file.copy:
|
||||
- name: /usr/sbin/so-common
|
||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-image-common_sbin:
|
||||
file.copy:
|
||||
- name: /usr/sbin/so-image-common
|
||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_soup_sbin:
|
||||
file.copy:
|
||||
- name: /usr/sbin/soup
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-firewall_sbin:
|
||||
file.copy:
|
||||
- name: /usr/sbin/so-firewall
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
@@ -334,6 +334,7 @@ desktop_packages:
|
||||
- pulseaudio-libs
|
||||
- pulseaudio-libs-glib2
|
||||
- pulseaudio-utils
|
||||
- putty
|
||||
- sane-airscan
|
||||
- sane-backends
|
||||
- sane-backends-drivers-cameras
|
||||
|
||||
@@ -84,6 +84,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-nginx-fleet-node':
|
||||
final_octet: 31
|
||||
port_bindings:
|
||||
- 8443:8443
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-playbook':
|
||||
final_octet: 32
|
||||
port_bindings:
|
||||
|
||||
@@ -48,6 +48,7 @@ docker:
|
||||
so-logstash: *dockerOptions
|
||||
so-mysql: *dockerOptions
|
||||
so-nginx: *dockerOptions
|
||||
so-nginx-fleet-node: *dockerOptions
|
||||
so-playbook: *dockerOptions
|
||||
so-redis: *dockerOptions
|
||||
so-sensoroni: *dockerOptions
|
||||
|
||||
@@ -17,6 +17,11 @@ include:
|
||||
- elasticfleet.sostatus
|
||||
- ssl
|
||||
|
||||
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
||||
wait_for_elasticsearch_elasticfleet:
|
||||
cmd.run:
|
||||
- name: so-elasticsearch-wait
|
||||
|
||||
# If enabled, automatically update Fleet Logstash Outputs
|
||||
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
|
||||
so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
@@ -33,12 +38,26 @@ so-elastic-fleet-auto-configure-server-urls:
|
||||
- retry: True
|
||||
{% endif %}
|
||||
|
||||
# Automatically update Fleet Server Elasticsearch URLs
|
||||
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
|
||||
{% if grains.role not in ['so-fleet'] %}
|
||||
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
||||
- retry: True
|
||||
|
||||
so-elastic-fleet-auto-configure-artifact-urls:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
|
||||
- retry: True
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Sync Elastic Agent artifacts to Fleet Node
|
||||
{% if grains.role in ['so-fleet'] %}
|
||||
elasticagent_syncartifacts:
|
||||
file.recurse:
|
||||
- name: /nsm/elastic-fleet/artifacts/beats
|
||||
- source: salt://beats
|
||||
{% endif %}
|
||||
|
||||
{% if SERVICETOKEN != '' %}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Only run on Managers
|
||||
if ! is_manager_node; then
|
||||
printf "Not a Manager Node... Exiting"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Function to check if an array contains a value
|
||||
array_contains () {
|
||||
local array="$1[@]"
|
||||
local seeking=$2
|
||||
local in=1
|
||||
for element in "${!array}"; do
|
||||
if [[ $element == "$seeking" ]]; then
|
||||
in=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
return $in
|
||||
}
|
||||
|
||||
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
|
||||
LOGSTASHNODES='{{ salt['pillar.get']('logstash:nodes', {}) | tojson }}'
|
||||
|
||||
# Initialize an array for new hosts from Fleet Nodes
|
||||
declare -a NEW_LIST=()
|
||||
|
||||
# Query for Fleet Nodes & add them to the list (Hostname)
|
||||
if grep -q "fleet" <<< "$LOGSTASHNODES"; then
|
||||
readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES")
|
||||
for NODE in "${FLEETNODES[@]}"; do
|
||||
URL="http://$NODE:8443/artifacts/"
|
||||
NAME="FleetServer_$NODE"
|
||||
NEW_LIST+=("$URL=$NAME")
|
||||
done
|
||||
fi
|
||||
|
||||
# Create an array for expected hosts and their names
|
||||
declare -A expected_urls=(
|
||||
["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}"
|
||||
["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts"
|
||||
)
|
||||
|
||||
# Merge NEW_LIST into expected_urls
|
||||
for entry in "${NEW_LIST[@]}"; do
|
||||
# Extract URL and Name from each entry
|
||||
IFS='=' read -r URL NAME <<< "$entry"
|
||||
# Add to expected_urls, automatically handling URL as key and NAME as value
|
||||
expected_urls["$URL"]="$NAME"
|
||||
done
|
||||
|
||||
# Fetch the current hosts from the API
|
||||
current_urls=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host)
|
||||
|
||||
# Convert current hosts to an array
|
||||
IFS=$'\n' read -rd '' -a current_urls_array <<<"$current_urls"
|
||||
|
||||
# Flag to track if any host was added
|
||||
any_url_added=0
|
||||
|
||||
# Check each expected host
|
||||
for host in "${!expected_urls[@]}"; do
|
||||
array_contains current_urls_array "$host" || {
|
||||
echo "$host (${expected_urls[$host]}) is missing. Adding it..."
|
||||
|
||||
# Prepare the JSON payload
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg NAME "${expected_urls[$host]}" \
|
||||
--arg URL "$host" \
|
||||
'{"name":$NAME,"host":$URL}' )
|
||||
|
||||
# Create the missing host
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
|
||||
# Flag that an artifact URL was added
|
||||
any_url_added=1
|
||||
}
|
||||
|
||||
done
|
||||
|
||||
|
||||
if [[ $any_url_added -eq 0 ]]; then
|
||||
echo "All expected artifact URLs are present. No updates needed."
|
||||
fi
|
||||
@@ -1,3 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
@@ -118,6 +118,19 @@ esingestconf:
|
||||
- user: 930
|
||||
- group: 939
|
||||
|
||||
# Auto-generate Elasticsearch ingest node pipelines from pillar
|
||||
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
|
||||
es_ingest_conf_{{pipeline}}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/ingest/{{ pipeline }}
|
||||
- source: salt://elasticsearch/base-template.json.jinja
|
||||
- defaults:
|
||||
TEMPLATE_CONFIG: {{ config }}
|
||||
- template: jinja
|
||||
- onchanges_in:
|
||||
- file: so-pipelines-reload
|
||||
{% endfor %}
|
||||
|
||||
eslog4jfile:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/log4j2.properties
|
||||
|
||||
@@ -55,6 +55,87 @@ elasticsearch:
|
||||
key: /usr/share/elasticsearch/config/elasticsearch.key
|
||||
verification_mode: none
|
||||
enabled: false
|
||||
pipelines:
|
||||
custom001:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom001
|
||||
- pipeline:
|
||||
name: common
|
||||
custom002:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom002
|
||||
- pipeline:
|
||||
name: common
|
||||
custom003:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom003
|
||||
- pipeline:
|
||||
name: common
|
||||
custom004:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom004
|
||||
- pipeline:
|
||||
name: common
|
||||
custom005:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom005
|
||||
- pipeline:
|
||||
name: common
|
||||
custom006:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom006
|
||||
- pipeline:
|
||||
name: common
|
||||
custom007:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom007
|
||||
- pipeline:
|
||||
name: common
|
||||
custom008:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom008
|
||||
- pipeline:
|
||||
name: common
|
||||
custom009:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom009
|
||||
- pipeline:
|
||||
name: common
|
||||
custom010:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom010
|
||||
- pipeline:
|
||||
name: common
|
||||
index_settings:
|
||||
global_overrides:
|
||||
index_template:
|
||||
|
||||
@@ -45,6 +45,28 @@ elasticsearch:
|
||||
description: Max number of boolean clauses per query.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
pipelines:
|
||||
custom001: &pipelines
|
||||
description:
|
||||
description: Description of the ingest node pipeline
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
processors:
|
||||
description: Processors for the ingest node pipeline
|
||||
global: True
|
||||
advanced: True
|
||||
multiline: True
|
||||
helpLink: elasticsearch.html
|
||||
custom002: *pipelines
|
||||
custom003: *pipelines
|
||||
custom004: *pipelines
|
||||
custom005: *pipelines
|
||||
custom006: *pipelines
|
||||
custom007: *pipelines
|
||||
custom008: *pipelines
|
||||
custom009: *pipelines
|
||||
custom010: *pipelines
|
||||
index_settings:
|
||||
global_overrides:
|
||||
index_template:
|
||||
|
||||
@@ -95,6 +95,7 @@
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-elastic-fleet',
|
||||
'so-logstash',
|
||||
'so-nginx-fleet-node'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-sensor' %}
|
||||
|
||||
@@ -39,7 +39,7 @@ so-idstools:
|
||||
{% endif %}
|
||||
- binds:
|
||||
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
|
||||
- /opt/so/rules/nids:/opt/so/rules/nids:rw
|
||||
- /opt/so/rules/nids/suri:/opt/so/rules/nids/suri:rw
|
||||
- /nsm/rules/:/nsm/rules/:rw
|
||||
{% if DOCKER.containers['so-idstools'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
|
||||
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%}
|
||||
--merged=/opt/so/rules/nids/all.rules
|
||||
--local=/opt/so/rules/nids/local.rules
|
||||
--merged=/opt/so/rules/nids/suri/all.rules
|
||||
--local=/opt/so/rules/nids/suri/local.rules
|
||||
{%- if GLOBALS.md_engine == "SURICATA" %}
|
||||
--local=/opt/so/rules/nids/extraction.rules
|
||||
--local=/opt/so/rules/nids/filters.rules
|
||||
--local=/opt/so/rules/nids/suri/extraction.rules
|
||||
--local=/opt/so/rules/nids/suri/filters.rules
|
||||
{%- endif %}
|
||||
--url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules
|
||||
--disable=/opt/so/idstools/etc/disable.conf
|
||||
|
||||
@@ -21,7 +21,7 @@ idstoolsetcsync:
|
||||
|
||||
rulesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/rules/nids
|
||||
- name: /opt/so/rules/nids/suri
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
@@ -29,7 +29,7 @@ rulesdir:
|
||||
# Don't show changes because all.rules can be large
|
||||
synclocalnidsrules:
|
||||
file.recurse:
|
||||
- name: /opt/so/rules/nids/
|
||||
- name: /opt/so/rules/nids/suri/
|
||||
- source: salt://idstools/rules/
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
@@ -63,6 +63,20 @@ lspipelinedir:
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
# Auto-generate Logstash pipeline config
|
||||
{% for pipeline, config in LOGSTASH_MERGED.pipeline_config.items() %}
|
||||
{% for assigned_pipeline in ASSIGNED_PIPELINES %}
|
||||
{% set custom_pipeline = 'custom/' + pipeline + '.conf' %}
|
||||
{% if custom_pipeline in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
|
||||
ls_custom_pipeline_conf_{{assigned_pipeline}}_{{pipeline}}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/logstash/pipelines/{{assigned_pipeline}}/{{ pipeline }}.conf
|
||||
- contents: LOGSTASH_MERGED.pipeline_config.{{pipeline}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
{% for assigned_pipeline in ASSIGNED_PIPELINES %}
|
||||
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
|
||||
ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}:
|
||||
|
||||
@@ -42,6 +42,24 @@ logstash:
|
||||
custom2: []
|
||||
custom3: []
|
||||
custom4: []
|
||||
pipeline_config:
|
||||
custom001: |-
|
||||
filter {
|
||||
if [event][module] =~ "zeek" {
|
||||
mutate {
|
||||
add_tag => ["network_stuff"]
|
||||
}
|
||||
}
|
||||
}
|
||||
custom002: PLACEHOLDER
|
||||
custom003: PLACEHOLDER
|
||||
custom004: PLACEHOLDER
|
||||
custom005: PLACEHOLDER
|
||||
custom006: PLACEHOLDER
|
||||
custom007: PLACEHOLDER
|
||||
custom008: PLACEHOLDER
|
||||
custom009: PLACEHOLDER
|
||||
custom010: PLACEHOLDER
|
||||
settings:
|
||||
lsheap: 500m
|
||||
config:
|
||||
|
||||
@@ -31,6 +31,22 @@ logstash:
|
||||
custom2: *defined_pipelines
|
||||
custom3: *defined_pipelines
|
||||
custom4: *defined_pipelines
|
||||
pipeline_config:
|
||||
custom001: &pipeline_config
|
||||
description: Pipeline configuration for Logstash
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: string
|
||||
helpLink: logstash.html
|
||||
custom002: *pipeline_config
|
||||
custom003: *pipeline_config
|
||||
custom004: *pipeline_config
|
||||
custom005: *pipeline_config
|
||||
custom006: *pipeline_config
|
||||
custom007: *pipeline_config
|
||||
custom008: *pipeline_config
|
||||
custom009: *pipeline_config
|
||||
custom010: *pipeline_config
|
||||
settings:
|
||||
lsheap:
|
||||
description: Heap size to use for logstash
|
||||
|
||||
@@ -16,12 +16,14 @@ lockFile = "/tmp/so-yaml.lock"
|
||||
def showUsage(args):
|
||||
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
|
||||
print(' General commands:')
|
||||
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
|
||||
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
|
||||
print(' help - Prints this usage information.')
|
||||
print('')
|
||||
print(' Where:')
|
||||
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
|
||||
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
|
||||
print(' LISTITEM - Item to add to the list.')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -35,6 +37,35 @@ def writeYaml(filename, content):
|
||||
file = open(filename, "w")
|
||||
return yaml.dump(content, file)
|
||||
|
||||
def appendItem(content, key, listItem):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
appendItem(content[pieces[0]], pieces[1], listItem)
|
||||
else:
|
||||
try:
|
||||
content[key].append(listItem)
|
||||
except AttributeError:
|
||||
print("The existing value for the given key is not a list. No action was taken on the file.")
|
||||
return 1
|
||||
except KeyError:
|
||||
print("The key provided does not exist. No action was taken on the file.")
|
||||
return 1
|
||||
|
||||
def append(args):
|
||||
if len(args) != 3:
|
||||
print('Missing filename, key arg, or list item to append', file=sys.stderr)
|
||||
showUsage(None)
|
||||
return
|
||||
|
||||
filename = args[0]
|
||||
key = args[1]
|
||||
listItem = args[2]
|
||||
|
||||
content = loadYaml(filename)
|
||||
appendItem(content, key, listItem)
|
||||
writeYaml(filename, content)
|
||||
|
||||
return 0
|
||||
|
||||
def removeKey(content, key):
|
||||
pieces = key.split(".", 1)
|
||||
@@ -69,6 +100,7 @@ def main():
|
||||
|
||||
commands = {
|
||||
"help": showUsage,
|
||||
"append": append,
|
||||
"remove": remove,
|
||||
}
|
||||
|
||||
|
||||
@@ -105,3 +105,99 @@ class TestRemove(unittest.TestCase):
|
||||
self.assertEqual(actual, expected)
|
||||
sysmock.assert_called_once_with(1)
|
||||
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
|
||||
|
||||
def test_append(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
|
||||
file.close()
|
||||
|
||||
soyaml.append([filename, "key3", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\n- d\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_append_nested(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.append([filename, "key1.child2", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_append_nested_deep(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.append([filename, "key1.child2.deep2", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_append_key_noexist(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key4", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
|
||||
|
||||
def test_append_key_noexist_deep(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
|
||||
|
||||
def test_append_key_nonlist(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key1", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
|
||||
|
||||
def test_append_key_nonlist_deep(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
|
||||
|
||||
@@ -594,6 +594,26 @@ up_to_2.4.50() {
|
||||
touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls
|
||||
touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls
|
||||
|
||||
# the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots
|
||||
# put new so-yaml in place
|
||||
echo "Updating so-yaml"
|
||||
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/"
|
||||
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/
|
||||
echo "Creating a backup of the salt-master config."
|
||||
# INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version
|
||||
# so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until
|
||||
# post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup
|
||||
cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak"
|
||||
echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml"
|
||||
so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids
|
||||
echo "Moving Suricata rules"
|
||||
mkdir /opt/so/rules/nids/suri
|
||||
chown socore:socore /opt/so/rules/nids/suri
|
||||
mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/.
|
||||
|
||||
echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml"
|
||||
so-yaml.py append /etc/salt/master file_roots.base /nsm/elastic-fleet/artifacts
|
||||
|
||||
INSTALLEDVERSION=2.4.50
|
||||
}
|
||||
|
||||
@@ -774,21 +794,18 @@ verify_latest_update_script() {
|
||||
echo "This version of the soup script is up to date. Proceeding."
|
||||
else
|
||||
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
|
||||
cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/manager/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/manager/tools/sbin/
|
||||
|
||||
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
|
||||
|
||||
# Verify that soup scripts updated as expected
|
||||
get_soup_script_hashes
|
||||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
|
||||
echo "Succesfully updated soup scripts."
|
||||
else
|
||||
# When STIGs are enabled soup scripts will fail to update using --file-root --local.
|
||||
# After checking that the expected hashes are not present, retry updating soup scripts using salt master.
|
||||
echo "There was a problem updating soup scripts.. Trying to rerun script update"
|
||||
salt-call state.apply common.soup_scripts queue=True -linfo
|
||||
echo "There was a problem updating soup scripts. Trying to rerun script update."
|
||||
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "The soup script has been modified. Please run soup again to continue the upgrade."
|
||||
exit 0
|
||||
@@ -937,9 +954,6 @@ main() {
|
||||
|
||||
systemctl_func "stop" "$cron_service_name"
|
||||
|
||||
# update mine items prior to stopping salt-minion and salt-master
|
||||
update_salt_mine
|
||||
|
||||
echo "Updating dockers to $NEWVERSION."
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
airgap_update_dockers
|
||||
@@ -1015,6 +1029,9 @@ main() {
|
||||
salt-call state.apply salt.minion -l info queue=True
|
||||
echo ""
|
||||
|
||||
# ensure the mine is updated and populated before highstates run, following the salt-master restart
|
||||
update_salt_mine
|
||||
|
||||
enable_highstate
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -14,6 +14,9 @@ include:
|
||||
- nginx.config
|
||||
- nginx.sostatus
|
||||
|
||||
|
||||
{% if grains.role not in ['so-fleet'] %}
|
||||
|
||||
{# if the user has selected to replace the crt and key in the ui #}
|
||||
{% if NGINXMERGED.ssl.replace_cert %}
|
||||
|
||||
@@ -88,6 +91,15 @@ make-rule-dir-nginx:
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
{% endif %}
|
||||
|
||||
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
|
||||
{% if GLOBALS.role == 'so-fleet' %}
|
||||
{% set container_config = 'so-nginx-fleet-node' %}
|
||||
{% else %}
|
||||
{% set container_config = 'so-nginx' %}
|
||||
{% endif %}
|
||||
|
||||
so-nginx:
|
||||
docker_container.running:
|
||||
@@ -95,11 +107,11 @@ so-nginx:
|
||||
- hostname: so-nginx
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }}
|
||||
- ipv4_address: {{ DOCKER.containers[container_config].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-nginx'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-nginx'].extra_hosts %}
|
||||
{% if DOCKER.containers[container_config].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers[container_config].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -119,20 +131,20 @@ so-nginx:
|
||||
- /nsm/repo:/opt/socore/html/repo:ro
|
||||
- /nsm/rules:/nsm/rules:ro
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-nginx'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-nginx'].custom_bind_mounts %}
|
||||
{% if DOCKER.containers[container_config].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers[container_config].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-nginx'].extra_env %}
|
||||
{% if DOCKER.containers[container_config].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-nginx'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers[container_config].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- cap_add: NET_BIND_SERVICE
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-nginx'].port_bindings %}
|
||||
{% for BINDING in DOCKER.containers[container_config].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- watch:
|
||||
|
||||
@@ -39,6 +39,26 @@ http {
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
{%- if role in ['fleet'] %}
|
||||
|
||||
server {
|
||||
listen 8443;
|
||||
server_name {{ GLOBALS.hostname }};
|
||||
root /opt/socore/html;
|
||||
location /artifacts/ {
|
||||
try_files $uri =206;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
|
||||
|
||||
server {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
master:
|
||||
version: 3006.5
|
||||
version: 3006.6
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
minion:
|
||||
version: 3006.5
|
||||
version: 3006.6
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
service_start_delay: 30 # in seconds.
|
||||
|
||||
@@ -9,7 +9,7 @@ soc:
|
||||
icon: fa-crosshairs
|
||||
target:
|
||||
links:
|
||||
- '/#/hunt?q="{value|escape}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{value|escape}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- name: actionAddToCase
|
||||
description: actionAddToCaseHelp
|
||||
icon: fa-briefcase
|
||||
@@ -23,13 +23,13 @@ soc:
|
||||
icon: fab fa-searchengin
|
||||
target: ''
|
||||
links:
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{:log.id.fuid}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{:log.id.uid}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{:network.community_id}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q="{:log.id.fuid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q="{:log.id.uid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q="{:network.community_id}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- name: actionPcap
|
||||
description: actionPcapHelp
|
||||
icon: fa-stream
|
||||
@@ -59,12 +59,18 @@ soc:
|
||||
target: _blank
|
||||
links:
|
||||
- 'https://www.virustotal.com/gui/search/{value}'
|
||||
- name: Sublime Platform Email Review
|
||||
description: Review email in Sublime Platform
|
||||
- name: actionSublime
|
||||
description: actionSublimeHelp
|
||||
icon: fa-external-link-alt
|
||||
target: _blank
|
||||
links:
|
||||
- 'https://{:sublime.url}/messages/{:sublime.message_group_id}'
|
||||
- 'https://{:sublime.url}/messages/{:sublime.message_group_id}'
|
||||
- name: actionProcessAncestors
|
||||
description: actionProcessAncestorsHelp
|
||||
icon: fa-people-roof
|
||||
target: ''
|
||||
links:
|
||||
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.module event.dataset | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory'
|
||||
eventFields:
|
||||
default:
|
||||
- soc_timestamp
|
||||
@@ -1425,7 +1431,7 @@ soc:
|
||||
query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby -sankey host.name dns.question.name | groupby event.dataset event.type | groupby host.name | groupby process.executable | groupby dns.question.name | groupby dns.answers.data'
|
||||
- name: Host Process Activity
|
||||
description: Process activity captured on an endpoint
|
||||
query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable'
|
||||
query: 'event.category:process | groupby -sankey host.name user.name* | groupby event.dataset event.action | groupby host.name | groupby user.name | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp event.dataset host.name user.name process.parent.name process.name process.working_directory'
|
||||
- name: Host File Activity
|
||||
description: File activity captured on an endpoint
|
||||
query: 'event.category: file AND _exists_:process.executable | groupby -sankey host.name process.executable | groupby host.name | groupby event.dataset event.action event.type | groupby file.name | groupby process.executable'
|
||||
@@ -1438,8 +1444,11 @@ soc:
|
||||
- name: Zeek Notice
|
||||
description: Zeek notice logs
|
||||
query: 'event.dataset:zeek.notice | groupby -sankey notice.note destination.ip | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name'
|
||||
- name: Connections
|
||||
description: Network connection metadata
|
||||
- name: Connections and Metadata with community_id
|
||||
description: Network connections that include community_id
|
||||
query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- name: Connections seen by Zeek or Suricata
|
||||
description: Network connections logged by Zeek or Suricata
|
||||
query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui'
|
||||
- name: DCE_RPC
|
||||
description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata
|
||||
@@ -1576,6 +1585,9 @@ soc:
|
||||
- name: Firewall
|
||||
description: Firewall logs
|
||||
query: 'observer.type:firewall | groupby -sankey event.action observer.ingress.interface.name | groupby event.action | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port'
|
||||
- name: Firewall Auth
|
||||
description: Firewall authentication logs
|
||||
query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message'
|
||||
- name: VLAN
|
||||
description: VLAN (Virtual Local Area Network) tagged logs
|
||||
query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name'
|
||||
|
||||
@@ -2,6 +2,10 @@ trusttheca:
|
||||
file.absent:
|
||||
- name: /etc/pki/tls/certs/intca.crt
|
||||
|
||||
symlinkca:
|
||||
file.absent:
|
||||
- name: /etc/ssl/certs/intca.crt
|
||||
|
||||
influxdb_key:
|
||||
file.absent:
|
||||
- name: /etc/pki/influxdb.key
|
||||
|
||||
@@ -84,10 +84,12 @@ suridatadir:
|
||||
- mode: 770
|
||||
- makedirs: True
|
||||
|
||||
# salt:// would resolve to /opt/so/rules/nids because of the defined file_roots and
|
||||
# not existing under /opt/so/saltstack/local/salt or /opt/so/saltstack/default/salt
|
||||
surirulesync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/suricata/rules/
|
||||
- source: salt://suricata/rules/
|
||||
- source: salt://suri/
|
||||
- user: 940
|
||||
- group: 940
|
||||
- show_changes: False
|
||||
|
||||
@@ -13,7 +13,7 @@ ruleslink:
|
||||
- name: /opt/so/saltstack/local/salt/suricata/rules
|
||||
- user: socore
|
||||
- group: socore
|
||||
- target: /opt/so/rules/nids
|
||||
- target: /opt/so/rules/nids/suri
|
||||
|
||||
refresh_salt_master_fileserver_suricata_ruleslink:
|
||||
salt.runner:
|
||||
@@ -27,4 +27,4 @@ refresh_salt_master_fileserver_suricata_ruleslink:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -264,6 +264,7 @@ base:
|
||||
- telegraf
|
||||
- firewall
|
||||
- logstash
|
||||
- nginx
|
||||
- elasticfleet
|
||||
- elasticfleet.install_agent_grid
|
||||
- schedule
|
||||
|
||||
@@ -1600,6 +1600,9 @@ reinstall_init() {
|
||||
salt-call -l info saltutil.kill_all_jobs --local
|
||||
fi
|
||||
|
||||
logCmd "salt-call state.apply ca.remove -linfo --local --file-root=../salt"
|
||||
logCmd "salt-call state.apply ssl.remove -linfo --local --file-root=../salt"
|
||||
|
||||
# Kill any salt processes (safely)
|
||||
for service in "${salt_services[@]}"; do
|
||||
# Stop the service in the background so we can exit after a certain amount of time
|
||||
@@ -1621,9 +1624,6 @@ reinstall_init() {
|
||||
done
|
||||
done
|
||||
|
||||
logCmd "salt-call state.apply ca.remove -linfo --local --file-root=../salt"
|
||||
logCmd "salt-call state.apply ssl.remove -linfo --local --file-root=../salt"
|
||||
|
||||
# Remove all salt configs
|
||||
rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/
|
||||
|
||||
@@ -2152,11 +2152,12 @@ set_default_log_size() {
|
||||
esac
|
||||
|
||||
local disk_dir="/"
|
||||
if [ -d /nsm ]; then
|
||||
if mountpoint -q /nsm; then
|
||||
disk_dir="/nsm"
|
||||
fi
|
||||
if [ -d /nsm/elasticsearch ]; then
|
||||
if mountpoint -q /nsm/elasticsearch; then
|
||||
disk_dir="/nsm/elasticsearch"
|
||||
percentage=80
|
||||
fi
|
||||
|
||||
local disk_size_1k
|
||||
|
||||
Reference in New Issue
Block a user