Merge branch '2.4/dev' into kilo

This commit is contained in:
Jason Ertel
2023-11-06 11:27:58 -05:00
39 changed files with 11743 additions and 5844 deletions

View File

@@ -12,7 +12,6 @@ role:
eval:
fleet:
heavynode:
helixsensor:
idh:
import:
manager:

View File

@@ -1,44 +0,0 @@
thresholding:
sids:
8675309:
- threshold:
gen_id: 1
type: threshold
track: by_src
count: 10
seconds: 10
- threshold:
gen_id: 1
type: limit
track: by_dst
count: 100
seconds: 30
- rate_filter:
gen_id: 1
track: by_rule
count: 50
seconds: 30
new_action: alert
timeout: 30
- suppress:
gen_id: 1
track: by_either
ip: 10.10.3.7
11223344:
- threshold:
gen_id: 1
type: limit
track: by_dst
count: 10
seconds: 10
- rate_filter:
gen_id: 1
track: by_src
count: 50
seconds: 20
new_action: pass
timeout: 60
- suppress:
gen_id: 1
track: by_src
ip: 10.10.3.0/24

View File

@@ -1,20 +0,0 @@
thresholding:
sids:
<signature id>:
- threshold:
gen_id: <generator id>
type: <threshold | limit | both>
track: <by_src | by_dst>
count: <count>
seconds: <seconds>
- rate_filter:
gen_id: <generator id>
track: <by_src | by_dst | by_rule | by_both>
count: <count>
seconds: <seconds>
new_action: <alert | pass>
timeout: <seconds>
- suppress:
gen_id: <generator id>
track: <by_src | by_dst | by_either>
ip: <ip | subnet>

View File

@@ -8,6 +8,7 @@ include:
- common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state
- manager.kibana
{% endif %}
net.core.wmem_default:

View File

@@ -152,15 +152,18 @@ check_salt_master_status() {
return 0
}
# this is only intended to be used to check the status of the minion from a salt master
check_salt_minion_status() {
local timeout="${1:-5}"
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1
local minion="$1"
local timeout="${2:-5}"
local logfile="${3:-'/dev/stdout'}"
echo "Checking if the salt minion: $minion will respond to jobs" >> "$logfile" 2>&1
salt "$minion" test.ping -t $timeout > /dev/null 2>&1
local status=$?
if [ $status -gt 0 ]; then
echo " Minion did not respond" >> "$setup_log" 2>&1
echo " Minion did not respond" >> "$logfile" 2>&1
else
echo " Received job response from salt minion" >> "$setup_log" 2>&1
echo " Received job response from salt minion" >> "$logfile" 2>&1
fi
return $status
@@ -440,6 +443,24 @@ run_check_net_err() {
fi
}
wait_for_salt_minion() {
local minion="$1"
local timeout="${2:-5}"
local logfile="${3:-'/dev/stdout'}"
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
attempt=$((attempt+1))
if [[ $attempt -eq $maxAttempts ]]; then
return 1
fi
sleep 10
done
return 0
}
salt_minion_count() {
local MINIONDIR="/opt/so/saltstack/local/pillar/minions"
MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l)
@@ -452,19 +473,51 @@ set_os() {
OS=rocky
OSVER=9
is_rocky=true
is_rpm=true
elif grep -q "CentOS Stream release 9" /etc/redhat-release; then
OS=centos
OSVER=9
is_centos=true
elif grep -q "Oracle Linux Server release 9" /etc/system-release; then
OS=oel
is_rpm=true
elif grep -q "AlmaLinux release 9" /etc/redhat-release; then
OS=alma
OSVER=9
is_oracle=true
is_alma=true
is_rpm=true
elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then
if [ -f /etc/oracle-release ]; then
OS=oracle
OSVER=9
is_oracle=true
is_rpm=true
else
OS=rhel
OSVER=9
is_rhel=true
is_rpm=true
fi
fi
cron_service_name="crond"
else
OS=ubuntu
is_ubuntu=true
elif [ -f /etc/os-release ]; then
if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
OSVER=focal
UBVER=20.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then
OSVER=jammy
UBVER=22.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then
OSVER=bookworm
DEBVER=12
is_debian=true
OS=debian
is_deb=true
fi
cron_service_name="cron"
fi
}
@@ -498,6 +551,10 @@ set_version() {
fi
}
status () {
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
}
systemctl_func() {
local action=$1
local echo_action=$1

View File

@@ -114,6 +114,11 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # server not yet ready (influxdb not yet setup)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|search_phase_execution_exception" # server not yet ready (elastalert running searches before ES is ready)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving docker" # Telegraf unable to reach Docker engine, rare
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving container" # Telegraf unable to reach Docker engine, rare
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -137,6 +142,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -158,6 +164,9 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to determine destination index stats" # Elastic transform temporary error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"

View File

@@ -1,67 +0,0 @@
#!/bin/bash
local_salt_dir=/opt/so/saltstack/local
zeek_logs_enabled() {
echo "zeeklogs:" > $local_salt_dir/pillar/zeeklogs.sls
echo " enabled:" >> $local_salt_dir/pillar/zeeklogs.sls
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/zeeklogs.sls
done
}
whiptail_manager_adv_service_zeeklogs() {
BLOGS=$(whiptail --title "so-zeek-logs" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \
"dce_rpc" "RPC Logs" ON \
"dhcp" "DHCP Logs" ON \
"dnp3" "DNP3 Logs" ON \
"dns" "DNS Logs" ON \
"dpd" "DPD Logs" ON \
"files" "Files Logs" ON \
"ftp" "FTP Logs" ON \
"http" "HTTP Logs" ON \
"intel" "Intel Hits Logs" ON \
"irc" "IRC Chat Logs" ON \
"kerberos" "Kerberos Logs" ON \
"modbus" "MODBUS Logs" ON \
"notice" "Zeek Notice Logs" ON \
"ntlm" "NTLM Logs" ON \
"pe" "PE Logs" ON \
"radius" "Radius Logs" ON \
"rfb" "RFB Logs" ON \
"rdp" "RDP Logs" ON \
"sip" "SIP Logs" ON \
"smb_files" "SMB Files Logs" ON \
"smb_mapping" "SMB Mapping Logs" ON \
"smtp" "SMTP Logs" ON \
"snmp" "SNMP Logs" ON \
"ssh" "SSH Logs" ON \
"ssl" "SSL Logs" ON \
"syslog" "Syslog Logs" ON \
"tunnel" "Tunnel Logs" ON \
"weird" "Zeek Weird Logs" ON \
"mysql" "MySQL Logs" ON \
"socks" "SOCKS Logs" ON \
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
local exitstatus=$?
IFS=' ' read -ra BLOGS <<< "$BLOGS"
return $exitstatus
}
whiptail_manager_adv_service_zeeklogs
return_code=$?
case $return_code in
1)
whiptail --title "so-zeek-logs" --msgbox "Cancelling. No changes have been made." 8 75
;;
255)
whiptail --title "so-zeek-logs" --msgbox "Whiptail error occured, exiting." 8 75
;;
*)
zeek_logs_enabled
;;
esac

View File

@@ -346,7 +346,6 @@ desktop_packages:
- snappy
- sound-theme-freedesktop
- soundtouch
- securityonion-networkminer
- speech-dispatcher
- speech-dispatcher-espeak-ng
- speex
@@ -433,6 +432,10 @@ desktop_packages:
- xorg-x11-xinit-session
- zip
install_networkminer:
pkg.latest:
- name: securityonion-networkminer
{% else %}
desktop_packages_os_fail:

View File

@@ -6,6 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if sls.split('.')[0] in allowed_states %}
{% set node_data = salt['pillar.get']('node_data') %}
# Add EA Group
elasticfleetgroup:
@@ -67,6 +68,7 @@ eapackageupgrade:
- source: salt://elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade
- user: 947
- group: 939
- mode: 755
- template: jinja
{% if GLOBALS.role != "so-fleet" %}
@@ -92,13 +94,53 @@ eaintegration:
- user: 947
- group: 939
eaoptionalintegrationsdir:
file.directory:
- name: /opt/so/conf/elastic-fleet/integrations-optional
- user: 947
- group: 939
- makedirs: True
{% for minion in node_data %}
{% set role = node_data[minion]["role"] %}
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
{% set optional_integrations = salt['pillar.get']('elasticfleet:optional_integrations', {}) %}
{% set integration_keys = salt['pillar.get']('elasticfleet:optional_integrations', {}).keys() %}
fleet_server_integrations_{{ minion }}:
file.directory:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}
- user: 947
- group: 939
- makedirs: True
{% for integration in integration_keys %}
{% if 'enabled_nodes' in optional_integrations[integration]%}
{% set enabled_nodes = optional_integrations[integration]["enabled_nodes"] %}
{% if minion in enabled_nodes %}
optional_integrations_dynamic_{{ minion }}_{{ integration }}:
file.managed:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json
- source: salt://elasticfleet/files/integrations-optional/{{ integration }}.json
- user: 947
- group: 939
- template: jinja
- defaults:
NAME: {{ minion }}
{% else %}
optional_integrations_dynamic_{{ minion }}_{{ integration }}_delete:
file.absent:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}
ea-integrations-load:
file.absent:
- name: /opt/so/state/eaintegrations.txt
- onchanges:
- file: eaintegration
- file: eadynamicintegration
- file: eapackageupgrade
- file: /opt/so/conf/elastic-fleet/integrations-optional/*
{% endif %}
{% else %}

View File

@@ -35,6 +35,7 @@ elasticfleet:
- azure
- barracuda
- carbonblack_edr
- checkpoint
- cisco_asa
- cisco_duo
- cisco_meraki
@@ -86,7 +87,15 @@ elasticfleet:
- ti_otx
- ti_recordedfuture
- udp
- vsphere
- windows
- zscaler_zia
- zscaler_zpa
- 1password
optional_integrations:
sublime_platform:
enabled_nodes: []
api_key:
base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m
limit: 100

View File

@@ -0,0 +1,44 @@
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED -%}
{%- from 'sensoroni/map.jinja' import SENSORONIMERGED -%}
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- raw -%}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "sublime-platform",
"namespace": "default",
"description": "",
"policy_id": "FleetServer_{%- endraw -%}{{ NAME }}{%- raw -%}",
"vars": {},
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"request_method": "GET",
"processors": "- drop_event:\n when:\n not:\n contains: \n message: \"flagged_rules\"\n- decode_json_fields:\n fields: [\"message\"]\n document_id: id\n target: \"\"",
"enable_request_tracer": false,
"oauth_scopes": [],
"request_transforms": "- set:\n target: header.Authorization\n value: 'Bearer {% endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.api_key }}{%- raw -%}'\n- set:\n target: header.accept\n value: application/json\n- set:\n target: url.params.last_message_created_at[gte]\n value: '[[formatDate (now (parseDuration \"-{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.poll_interval }}{%- raw -%}\")) \"2006-01-02T15:04:05Z\"]]'\n- set:\n target: url.params.reviewed\n value: false\n- set:\n target: url.params.flagged\n value: true\n- set:\n target: url.params.limit\n value: {% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.limit }}{%- raw -%}",
"response_transforms": "",
"request_redirect_headers_ban_list": [],
"request_encode_as": "application/x-www-form-urlencoded",
"request_url": "{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.base_url }}{%- raw -%}/v0/message-groups",
"response_split": "target: body.message_groups\ntype: array\nkeep_parent: false\ntransforms:\n - set:\n target: body.sublime.request_url\n value : '[[ .last_response.url.value ]]'",
"tags": [
"forwarded"
],
"pipeline": "sublime",
"data_stream.dataset": "sublime",
"request_interval": "1m"
}
}
}
}
}
}
{%- endraw -%}

View File

@@ -40,3 +40,36 @@ elasticfleet:
helpLink: elastic-fleet.html
sensitive: True
advanced: True
optional_integrations:
sublime_platform:
enabled_nodes:
description: Fleet nodes with the Sublime Platform integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"
api_key:
description: API key for Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
base_url:
description: Base URL for Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for alerts from Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
limit:
description: The maximum number of message groups to return from Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: int

View File

@@ -64,8 +64,28 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Fleet Server - Optional integrations
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json
do
if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then
FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7`
printf "\n\nFleet Server Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
else
printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then
elastic_fleet_integration_create "@$INTEGRATION"
fi
fi
fi
done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
else
exit $RETURN_CODE
fi

View File

@@ -8,8 +8,19 @@
INTCA=/etc/pki/tls/certs/intca.crt
. /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common
# Check to make sure that Kibana API is up & ready
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
printf "Kibana API not accessible, exiting Elastic Fleet setup..."
exit 1
fi
printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
@@ -120,3 +131,4 @@ salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers
salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0

View File

@@ -20,20 +20,12 @@
{% for NODE in ES_LOGSTASH_NODES %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %}
{% endfor %}
{% if grains.id.split('_') | last == 'manager' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master','data','remote_cluster_client','transform']}) %}
{% else %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data_hot', 'remote_cluster_client']}) %}
{% endif %}
{% endif %}
{% elif grains.id.split('_') | last == 'searchnode' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['data_hot', 'ingest']}) %}
{% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %}
{% endif %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %}
{% elif grains.id.split('_') | last == 'heavynode' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data', 'remote_cluster_client', 'ingest']}) %}
{% endif %}
{% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %}
@@ -53,3 +45,5 @@
{% endif %}
{% endfor %}
{% endif %}
{% do ELASTICSEARCHMERGED.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].config.node.roles}) %}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,34 @@
{
"description" : " Email alerts from Sublime",
"processors" : [
{ "set": { "field": "event.module", "value": "sublime" } },
{ "set": { "field": "event.dataset", "value": "alert" } },
{ "set": { "field": "event.severity", "value": 3, "override": true } },
{ "set": { "field": "rule.name", "value": "Sublime Platform: {{ flagged_rules.0.name }}", "override": true } },
{ "set": { "field": "sublime.message_group_id", "value": "{{ _id }}", "override": true } },
{ "set": { "field": "email.address", "value": "{{ messages.0.recipients.0.email }}", "override": true } },
{ "set": { "field": "email.forwarded_recipents", "value": "{{ messages.0.forwarded_receipients }}", "override": true } },
{ "set": { "field": "email.sender.address", "value": "{{ messages.0.sender.email }}", "override": true } },
{ "set": { "field": "email.subject", "value": "{{ messages.0.subject }}", "override": true } },
{ "set": { "field": "email.forwarded_at", "value": "{{ messages.0.forwarded_at }}", "override": true } },
{ "set": { "field": "email.created_at", "value": "{{ messages.0.created_at }}", "override": true } },
{ "set": { "field": "email.read_at", "value": "{{ messages.0.read_at }}", "override": true } },
{ "set": { "field": "email.replied_at", "value": "{{ messages.0.replied_at }}", "override": true } },
{
"grok": {
"field": "sublime.request_url",
"patterns": ["^https://api.%{DATA:sublime_host}/v0%{GREEDYDATA}$"],
"ignore_failure": true
}
},
{ "rename": { "field": "sublime_host", "target_field": "sublime.url", "ignore_missing": true } },
{ "rename": { "field": "data", "target_field": "sublime", "ignore_missing": true } },
{ "rename": { "field": "flagged_rules", "target_field": "sublime.flagged_rules", "ignore_missing": true } },
{ "rename": { "field": "organization_id", "target_field": "sublime.organization_id", "ignore_missing": true } },
{ "rename": { "field": "review_status", "target_field": "sublime.review_status", "ignore_missing": true } },
{ "rename": { "field": "state", "target_field": "sublime.state", "ignore_missing": true } },
{ "rename": { "field": "user_reports", "target_field": "sublime.user_reports", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -33,7 +33,6 @@ elasticsearch:
flood_stage:
description: The max percentage of used disk space that will cause the node to take protective actions, such as blocking incoming events.
helpLink: elasticsearch.html
script:
max_compilations_rate:
description: Max rate of script compilations permitted in the Elasticsearch cluster. Larger values will consume more resources.
@@ -57,32 +56,6 @@ elasticsearch:
forcedType: int
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
global: True
helpLink: elasticsearch.html
index_template:
index_patterns:
description: Patterns for matching multiple indices or tables.
forceType: "[]string"
multiline: True
global: True
helpLink: elasticsearch.html
template:
settings:
index:
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
forcedType: int
global: True
helpLink: elasticsearch.html
mapping:
total_fields:
limit:
description: Max number of fields that can exist on a single index. Larger values will consume more resources.
global: True
helpLink: elasticsearch.html
refresh_interval:
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
global: True
@@ -100,48 +73,10 @@ elasticsearch:
description: The order to sort by. Must set index_sorting to True.
global: True
helpLink: elasticsearch.html
mappings:
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
forcedType: "[]string"
global: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
forcedType: int
global: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
forcedType: bool
global: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
forcedType: bool
global: True
helpLink: elasticsearch.html
policy:
phases:
hot:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
max_age:
description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
global: True
helpLink: elasticsearch.html
actions:
@@ -160,10 +95,187 @@ elasticsearch:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
global: True
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^\[0-9\]{1,5}d$
forcedType: string
global: True
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
helpLink: elasticsearch.html
delete:
min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
global: True
advanced: True
helpLink: elasticsearch.html
index_template:
index_patterns:
description: Patterns for matching multiple indices or tables.
forceType: "[]string"
multiline: True
global: True
advanced: True
helpLink: elasticsearch.html
template:
settings:
index:
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
mapping:
total_fields:
limit:
description: Max number of fields that can exist on a single index. Larger values will consume more resources.
global: True
advanced: True
helpLink: elasticsearch.html
refresh_interval:
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
global: True
advanced: True
helpLink: elasticsearch.html
number_of_shards:
description: Number of shards required for this index. Using multiple shards increases fault tolerance, but also increases storage and network costs.
global: True
advanced: True
helpLink: elasticsearch.html
sort:
field:
description: The field to sort by. Must set index_sorting to True.
global: True
advanced: True
helpLink: elasticsearch.html
order:
description: The order to sort by. Must set index_sorting to True.
global: True
advanced: True
helpLink: elasticsearch.html
mappings:
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
forcedType: "[]string"
global: True
advanced: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
policy:
phases:
hot:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
rollover:
max_age:
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
max_primary_shard_size:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
rollover:
max_age:
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
max_primary_shard_size:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
@@ -171,26 +283,31 @@ elasticsearch:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
delete:
min_age:
description: Minimum age of index. This determines when the index should be deleted.
global: True
advanced: True
helpLink: elasticsearch.html
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
so-logs-system_x_auth: *indexSettings
so-logs-system_x_syslog: *indexSettings
@@ -345,3 +462,19 @@ elasticsearch:
so-strelka: *indexSettings
so-syslog: *indexSettings
so-zeek: *indexSettings
so_roles:
so-manager: &soroleSettings
config:
node:
roles:
description: List of Elasticsearch roles that the node should have. Blank assumes all roles
forcedType: "[]string"
global: False
advanced: True
helpLink: elasticsearch.html
so-managersearch: *soroleSettings
so-standalone: *soroleSettings
so-searchnode: *soroleSettings
so-heavynode: *soroleSettings
so-eval: *soroleSettings
so-import: *soroleSettings

View File

@@ -9,7 +9,9 @@
. /usr/sbin/so-common
{% if GLOBALS.role != 'so-heavynode' %}
. /usr/sbin/so-elastic-fleet-common
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
. /usr/sbin/so-elastic-fleet-common
fi
{% endif %}
default_conf_dir=/opt/so/conf

View File

@@ -1,454 +0,0 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
"""
Local exit codes:
- General error: 1
- Invalid argument: 2
- File error: 3
"""
import sys, os, subprocess, argparse, signal
import copy
import re
import textwrap
import yaml
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
salt_proc: subprocess.CompletedProcess = None
def print_err(string: str):
print(string, file=sys.stderr)
def check_apply(args: dict, prompt: bool = True):
if args.apply:
print('Configuration updated. Applying changes:')
return apply()
else:
if prompt:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
answer = input(message)
while answer.lower() not in [ 'y', 'n', '' ]:
answer = input(message)
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying changes:')
return apply()
else:
return 0
def apply():
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'idstools.sync_files', 'queue=True']
update_cmd = ['so-rule-update']
print('Syncing config files...')
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
if cmd.returncode == 0:
print('Updating rules...')
return subprocess.run(update_cmd).returncode
else:
return cmd.returncode
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
result = []
for root, _, files in os.walk(minion_pillar_dir):
for f_minion_id in files:
if re.search(regex, f_minion_id):
result.append(os.path.join(root, f_minion_id))
if len(result) == 0:
print_err('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?')
sys.exit(3)
elif len(result) > 1:
res_arr = []
for r in result:
res_arr.append(f'\"{r}\"')
res_str = ', '.join(res_arr)
print_err('(This should not happen, the system is in an error state if you see this message.)\n')
print_err('More than one manager-type pillar exists, minion id\'s listed below:')
print_err(f' {res_str}')
sys.exit(3)
else:
return result[0]
def read_pillar(pillar: str):
try:
with open(pillar, 'r') as f:
loaded_yaml = yaml.safe_load(f.read())
if loaded_yaml is None:
print_err(f'Could not parse {pillar}')
sys.exit(3)
return loaded_yaml
except:
print_err(f'Could not open {pillar}')
sys.exit(3)
def write_pillar(pillar: str, content: dict):
try:
sids = content['idstools']['sids']
if sids['disabled'] is not None:
if len(sids['disabled']) == 0: sids['disabled'] = None
if sids['enabled'] is not None:
if len(sids['enabled']) == 0: sids['enabled'] = None
if sids['modify'] is not None:
if len(sids['modify']) == 0: sids['modify'] = None
with open(pillar, 'w') as f:
return yaml.dump(content, f, default_flow_style=False)
except Exception as e:
print_err(f'Could not open {pillar}')
sys.exit(3)
def check_sid_pattern(sid_pattern: str):
message = f'SID {sid_pattern} is not valid, did you forget the \"re:\" prefix for a regex pattern?'
if sid_pattern.startswith('re:'):
r_string = sid_pattern[3:]
if not valid_regex(r_string):
print_err('Invalid regex pattern.')
return False
else:
return True
else:
sid: int
try:
sid = int(sid_pattern)
except:
print_err(message)
return False
if sid >= 0:
return True
else:
print_err(message)
return False
def valid_regex(pattern: str):
try:
re.compile(pattern)
return True
except re.error:
return False
def sids_key_exists(pillar: dict, key: str):
return key in pillar.get('idstools', {}).get('sids', {})
def rem_from_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
arr = pillar_dict['idstools']['sids'][key]
if arr is None or val not in arr:
if not optional: print(f'{val} already does not exist in {key}')
else:
pillar_dict['idstools']['sids'][key].remove(val)
return pillar_dict
def add_to_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
if pillar_dict['idstools']['sids'][key] is None:
pillar_dict['idstools']['sids'][key] = []
if val in pillar_dict['idstools']['sids'][key]:
if not optional: print(f'{val} already exists in {key}')
else:
pillar_dict['idstools']['sids'][key].append(val)
return pillar_dict
def add_rem_disabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'disabled'):
pillar_dict['idstools']['sids']['disabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'disabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['disabled'] == pillar_dict['idstools']['sids']['disabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'enabled'):
pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern, optional=True)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is not None:
rem_candidates = []
for action in modify:
if action.startswith(f'{args.sid_pattern} '):
rem_candidates.append(action)
if len(rem_candidates) > 0:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
while answer.lower() not in [ 'y', 'n', '' ]:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
if answer.lower() in [ 'y', '' ]:
for item in rem_candidates:
modify.remove(item)
pillar_dict['idstools']['sids']['modify'] = modify
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_disabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
disabled = pillar_dict.get('idstools', {}).get('sids', {}).get('disabled')
if disabled is None:
print('No rules disabled.')
return 0
else:
print('Disabled rules:')
for rule in disabled:
print(f' - {rule}')
return 0
def add_rem_enabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'enabled'):
pillar_dict['idstools']['sids']['enabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'enabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['enabled'] == pillar_dict['idstools']['sids']['enabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_enabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
enabled = pillar_dict.get('idstools', {}).get('sids', {}).get('enabled')
if enabled is None:
print('No rules explicitly enabled.')
return 0
else:
print('Enabled rules:')
for rule in enabled:
print(f' - {rule}')
return 0
def add_rem_modify(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
if not valid_regex(args.search_term):
print_err('Search term is not a valid regex pattern.')
string_val = f'{args.sid_pattern} \"{args.search_term}\" \"{args.replace_term}\"'
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'modify'):
pillar_dict['idstools']['sids']['modify'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'modify', string_val)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'modify', string_val)
if temp_pillar_dict['idstools']['sids']['modify'] == pillar_dict['idstools']['sids']['modify']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
# TODO: Determine if a rule should be removed from disabled if modified.
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_modified_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is None:
print('No rules currently modified.')
return 0
else:
print('Modified rules + modifications:')
for rule in modify:
print(f' - {rule}')
return 0
def sigint_handler(*_):
print('Exiting gracefully on Ctrl-C')
if salt_proc is not None: salt_proc.send_signal(signal.SIGINT)
sys.exit(0)
def main():
signal.signal(signal.SIGINT, sigint_handler)
if os.geteuid() != 0:
print_err('You must run this script as root')
sys.exit(1)
apply_help='After updating rule configuration, apply the idstools state.'
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
subcommand_desc = textwrap.dedent(
"""\
disabled Manage and list disabled rules (add, remove, list)
enabled Manage and list enabled rules (add, remove, list)
modify Manage and list modified rules (add, remove, list)
"""
)
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
sid_or_regex_help = 'A valid SID (ex: "4321") or regular expression pattern (ex: "re:heartbleed|spectre")'
# Disabled actions
disabled = subparsers.add_parser('disabled')
disabled_sub = disabled.add_subparsers()
disabled_add = disabled_sub.add_parser('add')
disabled_add.set_defaults(func=add_rem_disabled)
disabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_rem = disabled_sub.add_parser('remove')
disabled_rem.set_defaults(func=add_rem_disabled, remove=True)
disabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_list = disabled_sub.add_parser('list')
disabled_list.set_defaults(func=list_disabled_rules)
# Enabled actions
enabled = subparsers.add_parser('enabled')
enabled_sub = enabled.add_subparsers()
enabled_add = enabled_sub.add_parser('add')
enabled_add.set_defaults(func=add_rem_enabled)
enabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_rem = enabled_sub.add_parser('remove')
enabled_rem.set_defaults(func=add_rem_enabled, remove=True)
enabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_list = enabled_sub.add_parser('list')
enabled_list.set_defaults(func=list_enabled_rules)
search_term_help='A properly escaped regex search term (ex: "\\\$EXTERNAL_NET")'
replace_term_help='The text to replace the search term with'
# Modify actions
modify = subparsers.add_parser('modify')
modify_sub = modify.add_subparsers()
modify_add = modify_sub.add_parser('add')
modify_add.set_defaults(func=add_rem_modify)
modify_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
modify_add.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_add.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_rem = modify_sub.add_parser('remove')
modify_rem.set_defaults(func=add_rem_modify, remove=True)
modify_rem.add_argument('sid_pattern', metavar='SID', help=sid_or_regex_help)
modify_rem.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_rem.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_list = modify_sub.add_parser('list')
modify_list.set_defaults(func=list_modified_rules)
# Begin parse + run
args = main_parser.parse_args(sys.argv[1:])
if not hasattr(args, 'remove'):
args.remove = False
args.pillar = find_minion_pillar()
if hasattr(args, 'func'):
exit_code = args.func(args)
else:
if args.command is None:
main_parser.print_help()
else:
if args.command == 'disabled':
disabled.print_help()
elif args.command == 'enabled':
enabled.print_help()
elif args.command == 'modify':
modify.print_help()
sys.exit(0)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -22,7 +22,7 @@ so-influxdb:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment:
- INFLUXD_CONFIG_PATH=/conf
- INFLUXD_CONFIG_PATH=/conf/config.yaml
- INFLUXDB_HTTP_LOG_ENABLED=false
- DOCKER_INFLUXDB_INIT_MODE=setup
- DOCKER_INFLUXDB_INIT_USERNAME=so

View File

@@ -0,0 +1 @@
user = "{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user', 'NO_USER_SET') }}:{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass', 'NO_PW_SET') }}"

View File

@@ -11,7 +11,10 @@ input {
}
}
filter {
if ![metadata] {
mutate {
rename => {"@metadata" => "metadata"}
}
}
}

View File

@@ -13,10 +13,11 @@ input {
filter {
if "fleet-lumberjack-input" in [tags] {
if ![metadata] {
mutate {
rename => {"@metadata" => "metadata"}
}
}
}

View File

@@ -1,13 +1,16 @@
output {
if "elastic-agent" in [tags] {
if [metadata][pipeline] {
if [metadata][pipeline] {
if [metadata][_id] {
elasticsearch {
hosts => "{{ GLOBALS.manager }}"
ecs_compatibility => v8
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
document_id => "%{[metadata][_id]}"
pipeline => "%{[metadata][pipeline]}"
silence_errors_in_log => ["version_conflict_engine_exception"]
ssl => true
ssl_certificate_verification => false
}
@@ -19,10 +22,22 @@ output {
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
pipeline => "%{[metadata][pipeline]}"
ssl => true
ssl_certificate_verification => false
ssl_certificate_verification => false
}
}
}
}
else {
elasticsearch {
hosts => "{{ GLOBALS.manager }}"
ecs_compatibility => v8
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
ssl => true
ssl_certificate_verification => false
}
}
}
}

View File

@@ -16,6 +16,7 @@ include:
- kibana.secrets
- manager.sync_es_users
- manager.elasticsearch
- manager.kibana
repo_log_dir:
file.directory:

8
salt/manager/kibana.sls Normal file
View File

@@ -0,0 +1,8 @@
kibana_curl_config_distributed:
file.managed:
- name: /opt/so/conf/kibana/curl.config
- source: salt://kibana/files/curl.config.template
- template: jinja
- mode: 600
- show_changes: False
- makedirs: True

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
echo ""
echo "Hosts/Networks that have access to login to the Security Onion Console:"
so-firewall includedhosts analyst

View File

@@ -631,8 +631,15 @@ if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then
# and they need to wait for ca creation to update the mine
updateMine
checkMine "network.ip_addrs"
# apply the elasticsearch state to the manager if a new searchnode was added
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
# calls so-common and set_minionid sets MINIONID to local minion id
set_minionid
salt $MINIONID state.apply elasticsearch queue=True --async
salt $MINIONID state.apply soc queue=True --async
fi
# run this async so the cli doesn't wait for a return
salt "$MINION_ID" state.highstate --async
salt "$MINION_ID" state.highstate --async queue=True
fi
fi

View File

@@ -403,6 +403,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
[[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
true
}
@@ -414,7 +415,8 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
true
}
@@ -429,8 +431,7 @@ post_to_2.4.4() {
}
post_to_2.4.5() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
echo "Nothing to apply"
POSTVERSION=2.4.5
}
@@ -446,6 +447,12 @@ post_to_2.4.20() {
POSTVERSION=2.4.20
}
post_to_2.4.30() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.30
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -460,7 +467,6 @@ stop_salt_master() {
echo ""
echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
set -e
echo ""
echo "Storing salt-master pid."
@@ -468,6 +474,7 @@ stop_salt_master() {
echo "Found salt-master PID $MASTERPID"
systemctl_func "stop" "salt-master"
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
set -e
}
stop_salt_minion() {
@@ -480,14 +487,12 @@ stop_salt_minion() {
echo ""
echo "Killing Salt jobs on this node."
salt-call saltutil.kill_all_jobs --local
set -e
echo "Storing salt-minion pid."
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
echo "Found salt-minion PID $MINIONPID"
systemctl_func "stop" "salt-minion"
set +e
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
set -e
}
@@ -506,7 +511,7 @@ up_to_2.4.4() {
}
up_to_2.4.5() {
determine_elastic_agent_upgrade
echo "Nothing to do for 2.4.5"
INSTALLEDVERSION=2.4.5
}
@@ -523,6 +528,12 @@ up_to_2.4.20() {
INSTALLEDVERSION=2.4.20
}
up_to_2.4.30() {
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.30
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
@@ -568,7 +579,7 @@ update_airgap_rules() {
rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/
}
update_centos_repo() {
update_airgap_repo() {
# Update the files in the repo
echo "Syncing new updates to /nsm/repo"
rsync -av $AGREPO/* /nsm/repo/
@@ -620,6 +631,7 @@ upgrade_check_salt() {
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
echo "Salt needs to be upgraded to $NEWSALTVERSION."
UPGRADESALT=1
fi
}
@@ -628,22 +640,48 @@ upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [[ $OS == 'centos' ]]; then
# If rhel family
if [[ $is_rpm ]]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if oracle run with -r to ignore repos set by bootstrap
if [[ $OS == 'oracle' ]]; then
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
set -e
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
echo "Checking if Salt was upgraded."
@@ -655,7 +693,7 @@ upgrade_salt() {
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 0
exit 1
else
echo "Salt upgrade success."
echo ""
@@ -783,9 +821,10 @@ main() {
set -e
if [[ $is_airgap -eq 0 ]]; then
update_airgap_repo
yum clean all
check_os_updates
elif [[ $OS == 'oel' ]]; then
elif [[ $OS == 'oracle' ]]; then
# sync remote repo down to local if not airgap
repo_sync
check_os_updates
@@ -802,7 +841,8 @@ main() {
echo "Hotfix applied"
update_version
enable_highstate
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
else
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
@@ -838,6 +878,14 @@ main() {
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
# * WARN: Not starting daemons on Debian based distributions
# is not working mostly because starting them is the default behaviour.
if [[ $is_deb ]]; then
stop_salt_minion
stop_salt_master
fi
fi
preupgrade_changes
@@ -848,11 +896,6 @@ main() {
update_airgap_rules
fi
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then
update_centos_repo
fi
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINIONID" =~ "_import" ]]; then
echo ""
@@ -900,7 +943,8 @@ main() {
echo ""
echo "Running a highstate. This could take several minutes."
set +e
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e
stop_salt_master
@@ -915,7 +959,8 @@ main() {
set -e
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update

View File

@@ -0,0 +1,81 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
require_manager
# Inform user we are about to remove Elastic Fleet data
echo
echo "This script will remove the current Elastic Fleet install & all of its data and rerun Elastic Fleet setup."
echo
echo "If you would like to proceed, type AGREE and hit ENTER."
echo
# Read user input
read INPUT
if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi
status "Uninstalling all Elastic Agents on all Grid Nodes..."
salt \* cmd.run "elastic-agent uninstall -f" queue=True
status "Stopping Fleet Container..."
so-elastic-fleet-stop --force
status "Deleting Fleet Data from Pillars..."
sed -i -z "s/elasticfleet:.*grid_enrollment_heavy.*'//" /opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
sed -i "/fleet_grid_enrollment_token_general.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
sed -i "/fleet_grid_enrollment_token_heavy.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
status "Deleting Elastic Fleet data..."
# Check to make sure that Elasticsearch is up & ready
RETURN_CODE=0
wait_for_web_response "https://localhost:9200/_cat/indices/.kibana*" "green open" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Elasticsearch not accessible, exiting script..."
exit 1
fi
ALIASES=".fleet-servers .fleet-policies-leader .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
for ALIAS in ${ALIASES}
do
# Get all concrete indices from alias
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
# Delete all resolved indices
for INDX in ${INDXS}
do
status "Deleting $INDX"
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
done
done
status "Restarting Kibana..."
so-kibana-restart --force
status "Checking to make sure that Kibana API is up & ready..."
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Kibana API not accessible, exiting script..."
exit 1
fi
status "Starting Elastic Fleet Setup..."
so-elastic-fleet-setup
status "Re-installing Elastic Agent on all Grid Nodes..."
salt \* state.apply elasticfleet.install_agent_grid queue=True
status "Elastic Fleet Reset complete...."

View File

@@ -5,25 +5,21 @@
{% set SPLITCHAR = '+' %}
{% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %}
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
{% else %}
{% set SPLITCHAR = '-' %}
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %}
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
{% endif %}
{% set INSTALLEDSALTVERSION = grains.saltversion %}
{% if grains.saltversion|string != SALTVERSION|string %}
{% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %}
{% if grains.os_family|lower == 'redhat' %}
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% elif grains.os_family|lower == 'debian' %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %}
{% endif %}
{% else %}
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
version: 3006.1
version: 3006.3

View File

@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
version: 3006.1
version: 3006.3
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds.

File diff suppressed because it is too large Load Diff

View File

@@ -59,6 +59,12 @@ soc:
target: _blank
links:
- 'https://www.virustotal.com/gui/search/{value}'
- name: Sublime Platform Email Review
description: Review email in Sublime Platform
icon: fa-external-link-alt
target: _blank
links:
- 'https://{:sublime.url}/messages/{:sublime.message_group_id}'
eventFields:
default:
- soc_timestamp

View File

@@ -268,15 +268,6 @@ collect_dockernet() {
fi
}
collect_es_space_limit() {
whiptail_log_size_limit "$log_size_limit"
while ! valid_int "$log_size_limit"; do # Upper/lower bounds?
whiptail_invalid_input
whiptail_log_size_limit "$log_size_limit"
done
}
collect_gateway() {
whiptail_management_interface_gateway
@@ -286,28 +277,6 @@ collect_gateway() {
done
}
collect_homenet_mngr() {
whiptail_homenet_manager "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12"
while ! valid_cidr_list "$HNMANAGER"; do
whiptail_invalid_input
whiptail_homenet_manager "$HNMANAGER"
done
}
collect_homenet_snsr() {
if whiptail_homenet_sensor_inherit; then
export HNSENSOR=inherit
else
whiptail_homenet_sensor "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12"
while ! valid_cidr_list "$HNSENSOR"; do
whiptail_invalid_input
whiptail_homenet_sensor "$HNSENSOR"
done
fi
}
collect_hostname() {
collect_hostname_validate
@@ -346,26 +315,6 @@ collect_idh_preferences() {
if [[ "$idh_preferences" != "" ]]; then IDH_MGTRESTRICT='True'; fi
}
collect_idh_services() {
whiptail_idh_services
case "$IDH_SERVICES" in
'Linux Webserver (NAS Skin)')
IDH_SERVICES='"HTTP","FTP","SSH"'
;;
'MySQL Server')
IDH_SERVICES='"MYSQL","SSH"'
;;
'MSSQL Server')
IDH_SERVICES='"MSSQL","VNC'
;;
'Custom')
whiptail_idh_services_custom
IDH_SERVICES=$(echo "$IDH_SERVICES" | tr '[:blank:]' ',' )
;;
esac
}
collect_int_ip_mask() {
whiptail_management_interface_ip_mask
@@ -425,71 +374,6 @@ collect_net_method() {
fi
}
collect_ntp_servers() {
if whiptail_ntp_ask; then
[[ $is_airgap ]] && ntp_string=""
whiptail_ntp_servers "$ntp_string"
while ! valid_ntp_list "$ntp_string"; do
whiptail_invalid_input
whiptail_ntp_servers "$ntp_string"
done
IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array
else
ntp_servers=()
fi
}
collect_oinkcode() {
whiptail_oinkcode
while ! valid_string "$OINKCODE" "" "128"; do
whiptail_invalid_input
whiptail_oinkcode "$OINKCODE"
done
}
collect_patch_schedule() {
whiptail_patch_schedule
case "$patch_schedule" in
'New Schedule')
whiptail_patch_schedule_select_days
whiptail_patch_schedule_select_hours
collect_patch_schedule_name_new
patch_schedule_os_new
;;
'Import Schedule')
collect_patch_schedule_name_import
;;
'Automatic')
PATCHSCHEDULENAME='auto'
;;
'Manual')
PATCHSCHEDULENAME='manual'
;;
esac
}
collect_patch_schedule_name_new() {
whiptail_patch_name_new_schedule
while ! valid_string "$PATCHSCHEDULENAME"; do
whiptail_invalid_string "schedule name"
whiptail_patch_name_new_schedule "$PATCHSCHEDULENAME"
done
}
collect_patch_schedule_name_import() {
whiptail_patch_schedule_import
while ! valid_string "$PATCHSCHEDULENAME"; do
whiptail_invalid_string "schedule name"
whiptail_patch_schedule_import "$PATCHSCHEDULENAME"
done
}
collect_proxy() {
[[ -n $TESTING ]] && return
local ask=${1:-true}
@@ -658,47 +542,6 @@ configure_minion() {
} >> "$setup_log" 2>&1
}
configure_ntp() {
local chrony_conf=/etc/chrony.conf
# Install chrony if it isn't already installed
if ! command -v chronyc &> /dev/null; then
logCmd "dnf -y install chrony"
fi
[[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak"
printf '%s\n' "# NTP server list" > $chrony_conf
# Build list of servers
for addr in "${ntp_servers[@]}"; do
echo "server $addr iburst" >> $chrony_conf
done
printf '\n%s\n' "# Config options" >> $chrony_conf
printf '%s\n' \
'driftfile /var/lib/chrony/drift' \
'makestep 1.0 3' \
'rtcsync' \
'logdir /var/log/chrony' >> $chrony_conf
if [[ $is_rpm ]]; then
systemctl enable chronyd
systemctl restart chronyd
else
systemctl enable chrony
systemctl restart chrony
fi
# Tell the chrony daemon to sync time & update the system time
# Since these commands only make a call to chronyd, wait after each command to make sure the changes are made
printf "Syncing chrony time to server: "
chronyc -a 'burst 4/4' && sleep 30
printf "Forcing chrony to update the time: "
chronyc -a makestep && sleep 30
}
checkin_at_boot() {
local minion_config=/etc/salt/minion
@@ -719,7 +562,7 @@ check_requirements() {
req_cores=4
req_nics=2
elif [[ $is_standalone ]]; then
req_mem=24
req_mem=16
req_cores=4
req_nics=2
elif [[ $is_manager ]]; then
@@ -743,7 +586,7 @@ check_requirements() {
req_cores=4
req_nics=1
elif [[ $is_heavynode ]]; then
req_mem=24
req_mem=16
req_cores=4
req_nics=2
elif [[ $is_idh ]]; then
@@ -808,6 +651,17 @@ check_requirements() {
if [[ $total_mem_hr -lt $req_mem ]]; then
whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB"
if [[ $is_standalone || $is_heavynode ]]; then
echo "This install type will fail with less than $req_mem GB of memory. Exiting setup."
exit 0
fi
fi
if [[ $is_standalone || $is_heavynode ]]; then
if [[ $total_mem_hr -gt 15 && $total_mem_hr -lt 24 ]]; then
low_mem=true
else
low_mem=false
fi
fi
}
@@ -1055,16 +909,6 @@ download_elastic_agent_artifacts() {
fi
}
installer_progress_loop() {
local i=0
local msg="${1:-Performing background actions...}"
while true; do
[[ $i -lt 98 ]] && ((i++))
set_progress_str "$i" "$msg" nolog
[[ $i -gt 0 ]] && sleep 5s
done
}
installer_prereq_packages() {
if [[ $is_deb ]]; then
# Print message to stdout so the user knows setup is doing something
@@ -1123,9 +967,7 @@ docker_seed_registry() {
if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then
if [ "$install_type" == 'IMPORT' ]; then
container_list 'so-import'
elif [ "$install_type" == 'HELIXSENSOR' ]; then
container_list 'so-helix'
container_list 'so-import'
else
container_list
fi
@@ -1258,7 +1100,7 @@ generate_ssl() {
# if the install type is a manager then we need to wait for the minion to be ready before trying
# to run the ssl state since we need the minion to sign the certs
if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then
wait_for_salt_minion
(wait_for_salt_minion "$MINION_ID" "5" '/dev/stdout' || fail_setup) 2>&1 | tee -a "$setup_log"
fi
info "Applying SSL state"
logCmd "salt-call state.apply ssl -l info"
@@ -1384,7 +1226,7 @@ ls_heapsize() {
fi
case "$install_type" in
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
'MANAGERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
LS_HEAP_SIZE='1000m'
;;
'EVAL')
@@ -1648,21 +1490,6 @@ network_setup() {
logCmd "sed -i '/\$MNIC/${INTERFACE}/g' /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable"
}
ntp_pillar_entries() {
local pillar_file=$local_salt_dir/pillar/minions/$MINION_ID.sls
if [[ ${#ntp_servers[@]} -gt 0 ]]; then
printf '%s\n'\
"ntp:"\
" servers:" > "$pillar_file"
for addr in "${ntp_servers[@]}"; do
printf '%s\n' " - '$addr'" >> "$pillar_file"
done
fi
}
parse_install_username() {
# parse out the install username so things copy correctly
INSTALLUSERNAME=${SUDO_USER:-${USER}}
@@ -1881,7 +1708,11 @@ drop_install_options() {
echo "INTERFACE=$INTERFACE" >> /opt/so/install.txt
NODETYPE=${install_type^^}
echo "NODETYPE=$NODETYPE" >> /opt/so/install.txt
echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt
if [[ $low_mem == "true" ]]; then
echo "CORECOUNT=1" >> /opt/so/install.txt
else
echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt
fi
echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt
echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt
echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt
@@ -1972,6 +1803,7 @@ securityonion_repo() {
}
repo_sync_local() {
SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //')
info "Repo Sync"
if [[ $is_supported ]]; then
# Sync the repo from the the SO repo locally.
@@ -2021,7 +1853,7 @@ repo_sync_local() {
curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo
rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
curl -fsSL https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/3006.1.repo | tee /etc/yum.repos.d/salt.repo
curl -fsSL "https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/$SALTVERSION.repo" | tee /etc/yum.repos.d/salt.repo
dnf repolist
curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install
else
@@ -2493,20 +2325,6 @@ wait_for_file() {
return 1
}
wait_for_salt_minion() {
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status; do
attempt=$((attempt+1))
if [[ $attempt -eq $maxAttempts ]]; then
fail_setup
fi
sleep 10
done
}
verify_setup() {
info "Verifying setup"
set -o pipefail

View File

@@ -676,7 +676,11 @@ if ! [[ -f $install_opt_file ]]; then
export MAINIP=$MAINIP
export PATCHSCHEDULENAME=$PATCHSCHEDULENAME
export INTERFACE=$INTERFACE
export CORECOUNT=$lb_procs
if [[ $low_mem == "true" ]]; then
export CORECOUNT=1
else
export CORECOUNT=$lb_procs
fi
export LSHOSTNAME=$HOSTNAME
export LSHEAP=$LS_HEAP_SIZE
export CPUCORES=$num_cpu_cores
@@ -768,8 +772,11 @@ if ! [[ -f $install_opt_file ]]; then
info "Restarting SOC to pick up initial user"
logCmd "so-soc-restart"
title "Setting up Elastic Fleet"
logCmd "salt-call state.apply elasticfleet.config"
logCmd "so-elastic-fleet-setup"
logCmd "salt-call state.apply elasticfleet.config"
if ! logCmd so-elastic-fleet-setup; then
error "Failed to run so-elastic-fleet-setup"
fail_setup
fi
if [[ ! $is_import ]]; then
title "Setting up Playbook"
logCmd "so-playbook-reset"

View File

@@ -25,7 +25,8 @@ log_has_errors() {
# Ignore salt mast cached public key and minion failed to auth because this is a test
# to see if the salt key had already been accepted.
# Ignore failed to connect to ::1 since we have most curls wrapped in a retry.
# Ignore failed to connect to since we have most curls wrapped in a retry and there are
# multiple mirrors available.
# Ignore perl-Error- since that is the name of a Perl package SO installs.
@@ -35,11 +36,13 @@ log_has_errors() {
# This is ignored for Ubuntu
# Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target
# may be requested by dependency only (it is configured to refuse manual start/stop).
# Exit code 100 failure is likely apt-get running in the background, we wait for it to unlock.
grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \
grep -vE "The Salt Master has cached the public key for this node" | \
grep -vE "Minion failed to authenticate with the master" | \
grep -vE "Failed to connect to ::1" | \
grep -vE "Failed to connect to " | \
grep -vE "Failed to set locale" | \
grep -vE "perl-Error-" | \
grep -vE "Failed:\s*?[0-9]+" | \
@@ -54,11 +57,15 @@ log_has_errors() {
grep -vE "Login Failed Details" | \
grep -vE "response from daemon: unauthorized" | \
grep -vE "Reading first line of patchfile" | \
grep -vE "Command failed with exit code 100; will retry" | \
grep -vE "Running scope as unit" &> "$error_log"
if [[ $? -eq 0 ]]; then
# This function succeeds (returns 0) if errors are detected
return 0
fi
# No errors found, return 1 (function failed to find errors)
return 1
}
@@ -117,7 +124,10 @@ main() {
echo "WARNING: Failed setup a while ago"
exit_code=1
elif log_has_errors; then
echo "WARNING: Errors detected during setup"
echo "WARNING: Errors detected during setup."
echo "--------- ERRORS ---------"
cat $error_log
echo "--------------------------"
exit_code=1
touch /root/failure
elif using_iso && cron_error_in_mail_spool; then