Merge branch '2.4/dev' into desktop

This commit is contained in:
Mike Reeves
2023-06-26 15:36:48 -04:00
69 changed files with 1849 additions and 3413 deletions

View File

@@ -5,22 +5,22 @@ Security Onion 2.4 Release Candidate 1 (RC1) is here!
## Screenshots ## Screenshots
Alerts Alerts
![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/39_alerts.png) ![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png)
Dashboards Dashboards
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/40_dashboards.png) ![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/51_dashboards.png)
Hunt Hunt
![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/41_hunt.png) ![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/52_hunt.png)
PCAP PCAP
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/42_pcap.png) ![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_pcap.png)
Grid Grid
![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/46_grid.png) ![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_grid.png)
Config Config
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_config.png) ![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/61_config.png)
### Release Notes ### Release Notes

View File

@@ -46,6 +46,7 @@
'pcap', 'pcap',
'suricata', 'suricata',
'healthcheck', 'healthcheck',
'elasticagent',
'schedule', 'schedule',
'tcpreplay', 'tcpreplay',
'docker_clean' 'docker_clean'

View File

@@ -8,6 +8,15 @@ soup_scripts:
- source: salt://common/tools/sbin - source: salt://common/tools/sbin
- include_pat: - include_pat:
- so-common - so-common
- so-firewall
- so-image-common - so-image-common
- soup
soup_manager_scripts:
file.recurse:
- name: /usr/sbin
- user: root
- group: root
- file_mode: 755
- source: salt://manager/tools/sbin
- include_pat:
- so-firewall
- soup

View File

@@ -43,6 +43,7 @@ desktop_packages:
- bpftool - bpftool
- bzip2 - bzip2
- chkconfig - chkconfig
- chromium
- chrony - chrony
- cinnamon - cinnamon
- cinnamon-control-center - cinnamon-control-center
@@ -67,6 +68,7 @@ desktop_packages:
- dosfstools - dosfstools
- dracut-config-rescue - dracut-config-rescue
- dracut-live - dracut-live
- dsniff
- e2fsprogs - e2fsprogs
- ed - ed
- efi-filesystem - efi-filesystem
@@ -192,6 +194,7 @@ desktop_packages:
- nemo-preview - nemo-preview
- net-tools - net-tools
- netronome-firmware - netronome-firmware
- ngrep
- nm-connection-editor - nm-connection-editor
- nmap-ncat - nmap-ncat
- nvme-cli - nvme-cli
@@ -220,6 +223,7 @@ desktop_packages:
- psacct - psacct
- pt-sans-fonts - pt-sans-fonts
- python3-libselinux - python3-libselinux
- python3-scapy
- qemu-guest-agent - qemu-guest-agent
- quota - quota
- realmd - realmd
@@ -251,6 +255,7 @@ desktop_packages:
- smc-meera-fonts - smc-meera-fonts
- sos - sos
- spice-vdagent - spice-vdagent
- ssldump
- sssd - sssd
- sssd-common - sssd-common
- sssd-kcm - sssd-kcm
@@ -263,6 +268,7 @@ desktop_packages:
- systemd-udev - systemd-udev
- tar - tar
- tcpdump - tcpdump
- tcpflow
- teamd - teamd
- thai-scalable-waree-fonts - thai-scalable-waree-fonts
- time - time
@@ -282,8 +288,10 @@ desktop_packages:
- vim-powerline - vim-powerline
- virt-what - virt-what
- wget - wget
- whois
- which - which
- wireplumber - wireplumber
- wireshark
- words - words
- xdg-user-dirs-gtk - xdg-user-dirs-gtk
- xed - xed

View File

@@ -178,6 +178,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-elastic-agent':
final_octet: 46
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-telegraf': 'so-telegraf':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []

View File

@@ -0,0 +1,47 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if sls.split('.')[0] in allowed_states %}
# Add EA Group
elasticagentgroup:
group.present:
- name: elastic-agent
- gid: 949
# Add EA user
elastic-agent:
user.present:
- uid: 949
- gid: 949
- home: /opt/so/conf/elastic-agent
- createhome: False
elasticagentconfdir:
file.directory:
- name: /opt/so/conf/elastic-agent
- user: 949
- group: 939
- makedirs: True
# Create config
create-elastic-agent-config:
file.managed:
- name: /opt/so/conf/elastic-agent/elastic-agent.yml
- source: salt://elasticagent/files/elastic-agent.yml.jinja
- user: 949
- group: 939
- template: jinja
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,2 @@
elasticagent:
enabled: False

View File

@@ -0,0 +1,27 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
include:
- elasticagent.sostatus
so-elastic-agent:
docker_container.absent:
- force: True
so-elastic-agent_so-status.disabled:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-elastic-agent$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,62 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- elasticagent.config
- elasticagent.sostatus
so-elastic-agent:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent:{{ GLOBALS.so_version }}
- name: so-elastic-agent
- hostname: {{ GLOBALS.hostname }}
- detach: True
- user: 949
- networks:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-elastic-agent'].ip }}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-elastic-agent'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elastic-agent'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/conf/elastic-agent/elastic-agent.yml:/usr/share/elastic-agent/elastic-agent.yml:ro
- /nsm:/nsm:ro
{% if DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-elastic-agent'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-elastic-agent'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-elastic-agent_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-elastic-agent$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,119 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
id: aea1ba80-1065-11ee-a369-97538913b6a9
revision: 2
outputs:
default:
type: elasticsearch
hosts:
- 'https://{{ GLOBALS.hostname }}:9200'
username: '{{ ES_USER }}'
password: '{{ ES_PASS }}'
ssl.verification_mode: none
output_permissions: {}
agent:
download:
sourceURI: 'http://{{ GLOBALS.manager }}:8443/artifacts/'
monitoring:
enabled: false
logs: false
metrics: false
features: {}
inputs:
- id: logfile-logs-80ffa884-2cfc-459a-964a-34df25714d85
name: suricata-logs
revision: 1
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 80ffa884-2cfc-459a-964a-34df25714d85
streams:
- id: logfile-log.log-80ffa884-2cfc-459a-964a-34df25714d85
data_stream:
dataset: suricata
paths:
- /nsm/suricata/eve*.json
processors:
- add_fields:
target: event
fields:
category: network
module: suricata
pipeline: suricata.common
- id: logfile-logs-90103ac4-f6bd-4a4a-b596-952c332390fc
name: strelka-logs
revision: 1
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 90103ac4-f6bd-4a4a-b596-952c332390fc
streams:
- id: logfile-log.log-90103ac4-f6bd-4a4a-b596-952c332390fc
data_stream:
dataset: strelka
paths:
- /nsm/strelka/log/strelka.log
processors:
- add_fields:
target: event
fields:
category: file
module: strelka
pipeline: strelka.file
- id: logfile-logs-6197fe84-9b58-4d9b-8464-3d517f28808d
name: zeek-logs
revision: 1
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 6197fe84-9b58-4d9b-8464-3d517f28808d
streams:
- id: logfile-log.log-6197fe84-9b58-4d9b-8464-3d517f28808d
data_stream:
dataset: zeek
paths:
- /nsm/zeek/logs/current/*.log
processors:
- dissect:
tokenizer: '/nsm/zeek/logs/current/%{pipeline}.log'
field: log.file.path
trim_chars: .log
target_prefix: ''
- script:
lang: javascript
source: |
function process(event) {
var pl = event.Get("pipeline");
event.Put("@metadata.pipeline", "zeek." + pl);
}
- add_fields:
target: event
fields:
category: network
module: zeek
- add_tags:
tags: ics
when:
regexp:
pipeline: >-
^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*
exclude_files:
- >-
broker|capture_loss|cluster|ecat_arp_info|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout.log$

View File

@@ -0,0 +1,13 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'elasticagent/map.jinja' import ELASTICAGENTMERGED %}
include:
{% if ELASTICAGENTMERGED.enabled %}
- elasticagent.enabled
{% else %}
- elasticagent.disabled
{% endif %}

View File

@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'elasticagent/defaults.yaml' as ELASTICAGENTDEFAULTS %}
{% set ELASTICAGENTMERGED = salt['pillar.get']('elasticagent', ELASTICAGENTDEFAULTS.elasticagent, merge=True) %}

View File

@@ -0,0 +1,21 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
append_so-elastic-agent_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-elastic-agent
- unless: grep -q so-elastic-agent$ /opt/so/conf/so-status/so-status.conf
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,10 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart elastic-agent $1

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start elastic-agent $1

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop elastic-agent $1

View File

@@ -8,13 +8,13 @@
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
# Add EA Group # Add EA Group
elasticsagentgroup: elasticfleetgroup:
group.present: group.present:
- name: elastic-agent - name: elastic-fleet
- gid: 947 - gid: 947
# Add EA user # Add EA user
elastic-agent: elastic-fleet:
user.present: user.present:
- uid: 947 - uid: 947
- gid: 947 - gid: 947

View File

@@ -23,3 +23,11 @@ elasticfleet:
- stats - stats
- stderr - stderr
- stdout - stdout
packages:
- aws
- azure
- cloudflare
- fim
- github
- google_workspace
- 1password

View File

@@ -8,7 +8,7 @@
"name": "import-zeek-logs", "name": "import-zeek-logs",
"namespace": "so", "namespace": "so",
"description": "Zeek Import logs", "description": "Zeek Import logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -9,7 +9,7 @@
"name": "zeek-logs", "name": "zeek-logs",
"namespace": "so", "namespace": "so",
"description": "Zeek logs", "description": "Zeek logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -0,0 +1,106 @@
{
"package": {
"name": "elasticsearch",
"version": ""
},
"name": "elasticsearch-logs",
"namespace": "default",
"description": "Elasticsearch Logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"elasticsearch-logfile": {
"enabled": true,
"streams": {
"elasticsearch.audit": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_audit.json"
]
}
},
"elasticsearch.deprecation": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_deprecation.json"
]
}
},
"elasticsearch.gc": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/gc.log.[0-9]*",
"/var/log/elasticsearch/gc.log"
]
}
},
"elasticsearch.server": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/elasticsearch/*.log"
]
}
},
"elasticsearch.slowlog": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_index_search_slowlog.json",
"/var/log/elasticsearch/*_index_indexing_slowlog.json"
]
}
}
}
},
"elasticsearch-elasticsearch/metrics": {
"enabled": false,
"vars": {
"hosts": [
"http://localhost:9200"
],
"scope": "node"
},
"streams": {
"elasticsearch.stack_monitoring.ccr": {
"enabled": false
},
"elasticsearch.stack_monitoring.cluster_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.enrich": {
"enabled": false
},
"elasticsearch.stack_monitoring.index": {
"enabled": false
},
"elasticsearch.stack_monitoring.index_recovery": {
"enabled": false,
"vars": {
"active.only": true
}
},
"elasticsearch.stack_monitoring.index_summary": {
"enabled": false
},
"elasticsearch.stack_monitoring.ml_job": {
"enabled": false
},
"elasticsearch.stack_monitoring.node": {
"enabled": false
},
"elasticsearch.stack_monitoring.node_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.pending_tasks": {
"enabled": false
},
"elasticsearch.stack_monitoring.shard": {
"enabled": false
}
}
}
}
}

View File

@@ -6,7 +6,7 @@
"name": "idh-logs", "name": "idh-logs",
"namespace": "so", "namespace": "so",
"description": "IDH integration", "description": "IDH integration",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "import-evtx-logs", "name": "import-evtx-logs",
"namespace": "so", "namespace": "so",
"description": "Import Windows EVTX logs", "description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"vars": {}, "vars": {},
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {

View File

@@ -6,7 +6,7 @@
"name": "import-suricata-logs", "name": "import-suricata-logs",
"namespace": "so", "namespace": "so",
"description": "Import Suricata logs", "description": "Import Suricata logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/kratos/kratos.log"
],
"data_stream.dataset": "kratos",
"tags": ["so-kratos"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
"custom": "pipeline: kratos"
}
}
}
}
}
}

View File

@@ -0,0 +1,20 @@
{
"package": {
"name": "osquery_manager",
"version": ""
},
"name": "osquery-grid-nodes",
"namespace": "default",
"policy_id": "so-grid-nodes_general",
"inputs": {
"osquery_manager-osquery": {
"enabled": true,
"streams": {
"osquery_manager.result": {
"enabled": true,
"vars": {}
}
}
}
}
}

View File

@@ -0,0 +1,76 @@
{
"package": {
"name": "redis",
"version": ""
},
"name": "redis-logs",
"namespace": "default",
"description": "Redis logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"redis-logfile": {
"enabled": true,
"streams": {
"redis.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/redis/redis.log"
],
"tags": [
"redis-log"
],
"preserve_original_event": false
}
}
}
},
"redis-redis": {
"enabled": false,
"streams": {
"redis.slowlog": {
"enabled": false,
"vars": {
"hosts": [
"127.0.0.1:6379"
],
"password": ""
}
}
}
},
"redis-redis/metrics": {
"enabled": false,
"vars": {
"hosts": [
"127.0.0.1:6379"
],
"idle_timeout": "20s",
"maxconn": 10,
"network": "tcp",
"password": ""
},
"streams": {
"redis.info": {
"enabled": false,
"vars": {
"period": "10s"
}
},
"redis.key": {
"enabled": false,
"vars": {
"key.patterns": "- limit: 20\n pattern: *\n",
"period": "10s"
}
},
"redis.keyspace": {
"enabled": false,
"vars": {
"period": "10s"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/sync.log"
],
"data_stream.dataset": "soc",
"tags": ["so-soc"],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/salt-relay.log"
],
"data_stream.dataset": "soc",
"tags": ["so-soc"],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/sensoroni/sensoroni.log"
],
"data_stream.dataset": "soc",
"tags": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/sensoroni-server.log"
],
"data_stream.dataset": "soc",
"tags": ["so-soc"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -6,7 +6,7 @@
"name": "strelka-logs", "name": "strelka-logs",
"namespace": "so", "namespace": "so",
"description": "Strelka logs", "description": "Strelka logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "suricata-logs", "name": "suricata-logs",
"namespace": "so", "namespace": "so",
"description": "Suricata integration", "description": "Suricata integration",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "syslog-tcp-514", "name": "syslog-tcp-514",
"namespace": "so", "namespace": "so",
"description": "Syslog Over TCP Port 514", "description": "Syslog Over TCP Port 514",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"tcp-tcp": { "tcp-tcp": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "syslog-udp-514", "name": "syslog-udp-514",
"namespace": "so", "namespace": "so",
"description": "Syslog over UDP Port 514", "description": "Syslog over UDP Port 514",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"udp-udp": { "udp-udp": {
"enabled": true, "enabled": true,

View File

@@ -0,0 +1,40 @@
{
"policy_id": "so-grid-nodes_general",
"package": {
"name": "system",
"version": ""
},
"name": "system-grid-nodes",
"namespace": "default",
"inputs": {
"system-logfile": {
"enabled": true,
"streams": {
"system.auth": {
"enabled": true,
"vars": {
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
]
}
},
"system.syslog": {
"enabled": true,
"vars": {
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
]
}
}
}
},
"system-winlog": {
"enabled": false
},
"system-system/metrics": {
"enabled": false
}
}
}

View File

@@ -6,7 +6,7 @@
"name": "elasticsearch-logs", "name": "elasticsearch-logs",
"namespace": "default", "namespace": "default",
"description": "Elasticsearch Logs", "description": "Elasticsearch Logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"elasticsearch-logfile": { "elasticsearch-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "kratos-logs", "name": "kratos-logs",
"namespace": "so", "namespace": "so",
"description": "Kratos logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -5,7 +5,7 @@
}, },
"name": "osquery-grid-nodes", "name": "osquery-grid-nodes",
"namespace": "default", "namespace": "default",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"osquery_manager-osquery": { "osquery_manager-osquery": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "redis-logs", "name": "redis-logs",
"namespace": "default", "namespace": "default",
"description": "Redis logs", "description": "Redis logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"redis-logfile": { "redis-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "soc-auth-sync-logs", "name": "soc-auth-sync-logs",
"namespace": "so", "namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs", "description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "soc-salt-relay-logs", "name": "soc-salt-relay-logs",
"namespace": "so", "namespace": "so",
"description": "Security Onion - Salt Relay - Logs", "description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "soc-sensoroni-logs", "name": "soc-sensoroni-logs",
"namespace": "so", "namespace": "so",
"description": "Security Onion - Sensoroni - Logs", "description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -6,7 +6,7 @@
"name": "soc-server-logs", "name": "soc-server-logs",
"namespace": "so", "namespace": "so",
"description": "Security Onion Console Logs", "description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
"enabled": true, "enabled": true,

View File

@@ -1,5 +1,5 @@
{ {
"policy_id": "so-grid-nodes", "policy_id": "so-grid-nodes_heavy",
"package": { "package": {
"name": "system", "name": "system",
"version": "" "version": ""

View File

@@ -2,15 +2,24 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
{%- set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token') -%} {%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
{%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% if not AGENT_STATUS %} {% if not AGENT_STATUS %}
{% if grains.role not in ['so-heavynode'] %}
run_installer: run_installer:
cmd.script: cmd.script:
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64 - name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- cwd: /opt/so - cwd: /opt/so
- args: -token={{ GRIDNODETOKEN }} - args: -token={{ GRIDNODETOKENGENERAL }}
{% else %}
run_installer:
cmd.script:
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- cwd: /opt/so
- args: -token={{ GRIDNODETOKENHEAVY }}
{% endif %}
{% endif %} {% endif %}

View File

@@ -51,6 +51,21 @@ elastic_fleet_integration_update() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
} }
elastic_fleet_package_version_check() {
PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
}
elastic_fleet_package_install() {
PKGKEY=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PKGKEY"
}
elastic_fleet_package_is_installed() {
PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
}
elastic_fleet_policy_create() { elastic_fleet_policy_create() {
NAME=$1 NAME=$1

View File

@@ -25,11 +25,30 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
fi fi
done done
# Grid Nodes # Grid Nodes - General
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_general/*.json
do do
printf "\n\nGrid Nodes Policy - Loading $INTEGRATION\n" printf "\n\nGrid Nodes Policy_General - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
else
printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then
elastic_fleet_integration_create "@$INTEGRATION"
fi
fi
done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Grid Nodes - Heavy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_heavy/*.json
do
printf "\n\nGrid Nodes Policy_Heavy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION" elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
. /usr/sbin/so-elastic-fleet-common
{%- for PACKAGE in SUPPORTED_PACKAGES %}
echo "Setting up {{ PACKAGE }} package..."
VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
elastic_fleet_package_install "{{ PACKAGE }}-$VERSION"
echo
{%- endfor %}
echo

View File

@@ -48,6 +48,11 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
printf "\n\n" printf "\n\n"
### Create Policies & Associated Integration Configuration ### ### Create Policies & Associated Integration Configuration ###
# Load packages
/usr/sbin/so-elastic-fleet-package-load
# Load Elasticsearch templates
/usr/sbin/so-elasticsearch-templates-load
# Manager Fleet Server Host # Manager Fleet Server Host
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" "120" elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" "120"
@@ -62,8 +67,11 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fle
# Initial Endpoints Policy # Initial Endpoints Policy
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600" elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
# Grid Nodes Policy # Grid Nodes - General Policy
elastic_fleet_policy_create "so-grid-nodes" "SO Grid Node Policy" "false" "1209600" elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"
# Grid Nodes - Heavy Node Policy
elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"
# Load Integrations for default policies # Load Integrations for default policies
so-elastic-fleet-integration-policy-load so-elastic-fleet-integration-policy-load
@@ -81,7 +89,8 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
# Query for Enrollment Tokens for default policies # Query for Enrollment Tokens for default policies
ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
GRIDNODESENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes")) | .api_key') GRIDNODESENROLLMENTOKENGENERAL=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
GRIDNODESENROLLMENTOKENHEAVY=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
# Store needed data in minion pillar # Store needed data in minion pillar
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
@@ -92,13 +101,15 @@ printf '%s\n'\
" server:"\ " server:"\
" es_token: '$ESTOKEN'"\ " es_token: '$ESTOKEN'"\
" endpoints_enrollment: '$ENDPOINTSENROLLMENTOKEN'"\ " endpoints_enrollment: '$ENDPOINTSENROLLMENTOKEN'"\
" grid_enrollment: '$GRIDNODESENROLLMENTOKEN'"\ " grid_enrollment_general: '$GRIDNODESENROLLMENTOKENGENERAL'"\
" grid_enrollment_heavy: '$GRIDNODESENROLLMENTOKENHEAVY'"\
"" >> "$pillar_file" "" >> "$pillar_file"
#Store Grid Nodes Enrollment token in Global pillar #Store Grid Nodes Enrollment token in Global pillar
global_pillar_file=/opt/so/saltstack/local/pillar/global/soc_global.sls global_pillar_file=/opt/so/saltstack/local/pillar/global/soc_global.sls
printf '%s\n'\ printf '%s\n'\
" fleet_grid_enrollment_token: '$GRIDNODESENROLLMENTOKEN'"\ " fleet_grid_enrollment_token_general: '$GRIDNODESENROLLMENTOKENGENERAL'"\
" fleet_grid_enrollment_token_heavy: '$GRIDNODESENROLLMENTOKENHEAVY'"\
"" >> "$global_pillar_file" "" >> "$global_pillar_file"
# Call Elastic-Fleet Salt State # Call Elastic-Fleet Salt State

View File

@@ -1,13 +1,23 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %} {% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %}
{% from 'logstash/map.jinja' import LOGSTASH_NODES with context %}
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %} {% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
{# ES_LOGSTASH_NODES is the same as LOGSTASH_NODES from logstash/map.jinja but heavynodes are removed #}
{% set ES_LOGSTASH_NODES = [] %}
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{% for node_type, node_details in node_data.items() | sort %}
{% if node_type != 'heavynode' %}
{% for hostname in node_data[node_type].keys() %}
{% do ES_LOGSTASH_NODES.append({hostname:node_details[hostname].ip}) %}
{% endfor %}
{% endif %}
{% endfor %}
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %} {% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
{% if LOGSTASH_NODES | length > 1 %} {% if ES_LOGSTASH_NODES | length > 1 %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
{% for NODE in LOGSTASH_NODES %} {% for NODE in ES_LOGSTASH_NODES %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %}
{% endfor %} {% endfor %}
{% if grains.id.split('_') | last == 'manager' %} {% if grains.id.split('_') | last == 'manager' %}
@@ -22,6 +32,8 @@
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %}
{% endif %} {% endif %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %}
{% elif grains.id.split('_') | last == 'heavynode' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data', 'remote_cluster_client', 'ingest']}) %}
{% endif %} {% endif %}
{% if HIGHLANDER %} {% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %}

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'logstash/map.jinja' import LOGSTASH_NODES %} {% from 'logstash/map.jinja' import LOGSTASH_NODES %}
{% from 'elasticsearch/config.map.jinja' import ES_LOGSTASH_NODES %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} {% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
@@ -32,7 +33,7 @@ so-elasticsearch:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- environment: - environment:
{% if LOGSTASH_NODES | length == 1 %} {% if ES_LOGSTASH_NODES | length == 1 or GLOBALS.role == 'so-heavynode' %}
- discovery.type=single-node - discovery.type=single-node
{% endif %} {% endif %}
- ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true - ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true
@@ -144,6 +145,7 @@ es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if GLOBALS.role in GLOBALS.manager_roles %}
so-es-cluster-settings: so-es-cluster-settings:
cmd.run: cmd.run:
- name: /usr/sbin/so-elasticsearch-cluster-settings - name: /usr/sbin/so-elasticsearch-cluster-settings
@@ -152,6 +154,7 @@ so-es-cluster-settings:
- require: - require:
- docker_container: so-elasticsearch - docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja - file: elasticsearch_sbin_jinja
{% endif %}
so-elasticsearch-ilm-policy-load: so-elasticsearch-ilm-policy-load:
cmd.run: cmd.run:

View File

@@ -1,63 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
default_conf_dir=/opt/so/conf
# Define a default directory to load pipelines from
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query / -k --output /dev/null --silent --head --fail
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit 1
fi
set -e
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
echo "Loading ECS component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE-mappings"; so-elasticsearch-query _component_template/$TEMPLATE-mappings -d@$i -XPUT 2>/dev/null; echo; done
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
echo "Loading Elastic Agent component templates..."
for i in *; do TEMPLATE=${i::-5}; echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
# Load SO-specific component templates
cd ${ELASTICSEARCH_TEMPLATES}/component/so
echo "Loading Security Onion component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
echo
# Load SO index templates
cd ${ELASTICSEARCH_TEMPLATES}/index
echo "Loading Security Onion index templates..."
for i in *; do TEMPLATE=${i::-14}; echo "$TEMPLATE"; so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
echo
cd - >/dev/null

View File

@@ -3,6 +3,8 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
. /usr/sbin/so-common . /usr/sbin/so-common
@@ -14,17 +16,17 @@ COUNT=0
ELASTICSEARCH_CONNECTED="no" ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do while [[ "$COUNT" -le 30 ]]; do
curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://localhost:"$ELASTICSEARCH_PORT" curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://localhost:"$ELASTICSEARCH_PORT"
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes" ELASTICSEARCH_CONNECTED="yes"
echo "connected!" echo "connected!"
break break
else else
((COUNT+=1)) ((COUNT+=1))
sleep 1 sleep 1
echo -n "." echo -n "."
fi fi
done done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'docker ps' \n -running 'sudo so-elastic-restart'" echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'docker ps' \n -running 'sudo so-elastic-restart'"
echo echo
@@ -32,9 +34,12 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
exit exit
fi fi
# Check to see if config already exists {%- if GLOBALS.role in GLOBALS.manager_roles %}
CLUSTER_SETTINGS=$(so-elasticsearch-query _cluster/settings | jq .persistent.cluster.remote)
if [[ ! -z "$CLUSTER_SETTINGS" ]]; then
echo "Applying cross cluster search config..." echo "Applying cross cluster search config..."
so-elasticsearch-query _cluster/settings -d "{\"persistent\": {\"cluster\": {\"remote\": {\"{{ GLOBALS.manager }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" -XPUT so-elasticsearch-query _cluster/settings -d "{\"persistent\": {\"cluster\": {\"remote\": {\"{{ GLOBALS.manager }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" -XPUT
fi {%- if node_data['heavynode'] is defined %}
{%- for hostname, node_details in node_data['heavynode'].items() %}
so-elasticsearch-query _cluster/settings -d "{\"persistent\": {\"cluster\": {\"remote\": {\"{{ hostname }}\": {\"seeds\": [\"{{node_details.ip}}:9300\"]}}}}}" -XPUT
{%- endfor %}
{%- endif %}
{%- endif %}

View File

@@ -0,0 +1,80 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
. /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common
default_conf_dir=/opt/so/conf
# Define a default directory to load pipelines from
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query / -k --output /dev/null --silent --head --fail
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit 1
fi
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
if [ "$INSTALLED" != "installed" ]; then
echo
echo "Packages not yet installed."
echo
exit 0
fi
set -e
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
echo "Loading ECS component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE-mappings"; so-elasticsearch-query _component_template/$TEMPLATE-mappings -d@$i -XPUT 2>/dev/null; echo; done
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
echo "Loading Elastic Agent component templates..."
for i in *; do TEMPLATE=${i::-5}; echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
# Load SO-specific component templates
cd ${ELASTICSEARCH_TEMPLATES}/component/so
echo "Loading Security Onion component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
echo
# Load SO index templates
cd ${ELASTICSEARCH_TEMPLATES}/index
echo "Loading Security Onion index templates..."
for i in *; do TEMPLATE=${i::-14}; echo "$TEMPLATE"; so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
echo
else
echo "Elastic Fleet not configured. Exiting..."
exit 0
fi
cd - >/dev/null

View File

@@ -417,6 +417,14 @@ firewall:
- elastic_agent_control - elastic_agent_control
- elastic_agent_data - elastic_agent_data
- elastic_agent_update - elastic_agent_update
receiver:
portgroups:
- yum
- docker_registry
- influxdb
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
self: self:
portgroups: portgroups:
- syslog - syslog
@@ -486,6 +494,9 @@ firewall:
heavynode: heavynode:
portgroups: portgroups:
- salt_manager - salt_manager
receiver:
portgroups:
- salt_manager
customhostgroup0: customhostgroup0:
portgroups: [] portgroups: []
customhostgroup1: customhostgroup1:
@@ -569,6 +580,14 @@ firewall:
- elastic_agent_control - elastic_agent_control
- elastic_agent_data - elastic_agent_data
- elastic_agent_update - elastic_agent_update
receiver:
portgroups:
- yum
- docker_registry
- influxdb
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
self: self:
portgroups: portgroups:
- syslog - syslog
@@ -638,6 +657,9 @@ firewall:
heavynode: heavynode:
portgroups: portgroups:
- salt_manager - salt_manager
receiver:
portgroups:
- salt_manager
customhostgroup0: customhostgroup0:
portgroups: [] portgroups: []
customhostgroup1: customhostgroup1:
@@ -731,6 +753,14 @@ firewall:
- redis - redis
- elasticsearch_rest - elasticsearch_rest
- elasticsearch_node - elasticsearch_node
receiver:
portgroups:
- yum
- docker_registry
- influxdb
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
self: self:
portgroups: portgroups:
- syslog - syslog
@@ -806,6 +836,9 @@ firewall:
heavynode: heavynode:
portgroups: portgroups:
- salt_manager - salt_manager
receiver:
portgroups:
- salt_manager
customhostgroup0: customhostgroup0:
portgroups: [] portgroups: []
customhostgroup1: customhostgroup1:
@@ -1128,7 +1161,9 @@ firewall:
hostgroups: hostgroups:
sensor: sensor:
portgroups: portgroups:
- beats_5044
- beats_5644 - beats_5644
- elastic_agent_data
searchnode: searchnode:
portgroups: portgroups:
- redis - redis

View File

@@ -8,6 +8,7 @@ logstash:
receiver: receiver:
- receiver - receiver
heavynode: heavynode:
- manager
- search - search
searchnode: searchnode:
- search - search

View File

@@ -58,7 +58,7 @@ so-logstash:
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %} {% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-eval','so-fleet'] %} {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-eval','so-fleet', 'so-heavynode', 'so-receiver'] %}
- /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro - /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
- /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.p8:/usr/share/logstash/elasticfleet-logstash.key:ro - /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.p8:/usr/share/logstash/elasticfleet-logstash.key:ro
{% endif %} {% endif %}

View File

@@ -8,6 +8,7 @@
{% set LOGSTASH_MERGED = salt['pillar.get']('logstash', LOGSTASH_DEFAULTS.logstash, merge=True) %} {% set LOGSTASH_MERGED = salt['pillar.get']('logstash', LOGSTASH_DEFAULTS.logstash, merge=True) %}
{% set REDIS_NODES = [] %} {% set REDIS_NODES = [] %}
{# LOGSTASH_NODES is the same as ES_LOGSTASH_NODES from elasticsearch/config.map.jinja but heavynodes are present #}
{% set LOGSTASH_NODES = [] %} {% set LOGSTASH_NODES = [] %}
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %} {% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}

View File

@@ -133,6 +133,15 @@ function add_elasticsearch_to_minion() {
" " >> $PILLARFILE " " >> $PILLARFILE
} }
# Add Elastic Agent settings to the minion file
function add_elastic_agent_to_minion() {
printf '%s\n'\
"elasticagent:"\
" enabled: True"\
" " >> $PILLARFILE
}
# Add Elastic Fleet Server settings to the minion file # Add Elastic Fleet Server settings to the minion file
function add_fleet_to_minion() { function add_fleet_to_minion() {
@@ -518,6 +527,7 @@ function createIDH() {
function createHEAVYNODE() { function createHEAVYNODE() {
add_elasticsearch_to_minion add_elasticsearch_to_minion
add_elastic_agent_to_minion
add_logstash_to_minion add_logstash_to_minion
add_sensor_to_minion add_sensor_to_minion
add_strelka_to_minion add_strelka_to_minion

View File

@@ -7,6 +7,7 @@
. /usr/sbin/so-common . /usr/sbin/so-common
. /usr/sbin/so-image-common
UPDATE_DIR=/tmp/sogh/securityonion UPDATE_DIR=/tmp/sogh/securityonion
DEFAULT_SALT_DIR=/opt/so/saltstack/default DEFAULT_SALT_DIR=/opt/so/saltstack/default
@@ -178,7 +179,7 @@ update_registry() {
check_airgap() { check_airgap() {
# See if this is an airgap install # See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}') AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}')
if [[ "$AIRGAP" == "True" ]]; then if [[ "$AIRGAP" == "True" ]]; then
is_airgap=0 is_airgap=0
UPDATE_DIR=/tmp/soagupdate/SecurityOnion UPDATE_DIR=/tmp/soagupdate/SecurityOnion
@@ -303,7 +304,7 @@ check_log_size_limit() {
check_os_updates() { check_os_updates() {
# Check to see if there are OS updates # Check to see if there are OS updates
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated." NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
OSUPDATES=$(yum -q list updates | wc -l) OSUPDATES=$(dnf -q list updates | grep -v docker | grep -v containerd | grep -v salt | grep -v Available | wc -l)
if [[ "$OSUPDATES" -gt 0 ]]; then if [[ "$OSUPDATES" -gt 0 ]]; then
if [[ -z $UNATTENDED ]]; then if [[ -z $UNATTENDED ]]; then
echo "$NEEDUPDATES" echo "$NEEDUPDATES"
@@ -509,7 +510,7 @@ update_version() {
echo "Updating the Security Onion version file." echo "Updating the Security Onion version file."
echo $NEWVERSION > /etc/soversion echo $NEWVERSION > /etc/soversion
echo $HOTFIXVERSION > /etc/sohotfix echo $HOTFIXVERSION > /etc/sohotfix
sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global.sls sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global/soc_global.sls
} }
upgrade_check() { upgrade_check() {
@@ -583,22 +584,22 @@ upgrade_salt() {
verify_latest_update_script() { verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one. # Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}') CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}') GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}') CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}') GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}') CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}') GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}') CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-firewall | awk '{print $1}') GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "This version of the soup script is up to date. Proceeding." echo "This version of the soup script is up to date. Proceeding."
else else
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete." echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/ cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
echo "" echo ""
echo "The soup script has been modified. Please run soup again to continue the upgrade." echo "The soup script has been modified. Please run soup again to continue the upgrade."
@@ -746,7 +747,7 @@ main() {
stop_salt_master stop_salt_master
update_repo #update_repo
# Does salt need upgraded. If so update it. # Does salt need upgraded. If so update it.
if [[ $UPGRADESALT -eq 1 ]]; then if [[ $UPGRADESALT -eq 1 ]]; then
@@ -844,8 +845,8 @@ main() {
fi fi
fi fi
echo "Checking for local modifications." #echo "Checking for local modifications."
check_local_mods #check_local_mods
echo "Checking sudoers file." echo "Checking sudoers file."
check_sudoers check_sudoers

View File

@@ -1117,6 +1117,9 @@ soc:
- name: caseExcludeToggle - name: caseExcludeToggle
filter: 'NOT _index:"*:so-case*"' filter: 'NOT _index:"*:so-case*"'
enabled: true enabled: true
- name: socExcludeToggle
filter: 'NOT event.module:"soc"'
enabled: true
queries: queries:
- name: Default Query - name: Default Query
description: Show all events grouped by the observer host description: Show all events grouped by the observer host
@@ -1384,6 +1387,9 @@ soc:
- name: caseExcludeToggle - name: caseExcludeToggle
filter: 'NOT _index:"*:so-case*"' filter: 'NOT _index:"*:so-case*"'
enabled: true enabled: true
- name: socExcludeToggle
filter: 'NOT event.module:"soc"'
enabled: true
queries: queries:
- name: Overview - name: Overview
description: Overview of all events description: Overview of all events

View File

@@ -140,7 +140,7 @@ rediskeyperms:
- group: 939 - group: 939
{% endif %} {% endif %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet'] %} {% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %}
# Create cert for Elastic Fleet Host # Create cert for Elastic Fleet Host
etc_elasticfleet_key: etc_elasticfleet_key:

View File

@@ -209,6 +209,7 @@ base:
- suricata - suricata
- zeek - zeek
- elasticfleet.install_agent_grid - elasticfleet.install_agent_grid
- elasticagent
- docker_clean - docker_clean
'*_import and G@saltversion:{{saltversion}}': '*_import and G@saltversion:{{saltversion}}':

View File

@@ -0,0 +1 @@
{% set ROLE_GLOBALS = {} %}

View File

@@ -974,16 +974,16 @@ detect_os() {
} }
download_elastic_agent_artifacts() { download_elastic_agent_artifacts() {
agentArchive=/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz
if [[ $is_iso ]]; then agentMd5=/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5
logCmd "tar -xf /nsm/elastic-fleet/artifacts/beats/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/" beatsDir=/nsm/elastic-fleet/artifacts/beats/elastic-agent
else logCmd "mkdir -p $beatsDir"
logCmd "mkdir -p /nsm/elastic-fleet/artifacts/beats/elastic-agent/" if [[ ! -f "$agentArchive" ]]; then
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" "" "" retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz --output $agentArchive" "" ""
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5 --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" "" "" retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5 --output $agentMd5" "" ""
SOURCEHASH=$(md5sum /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz | awk '{ print $1 }') SOURCEHASH=$(md5sum $agentArchive | awk '{ print $1 }')
HASH=$(cat /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5) HASH=$(cat $agentMd5)
if [[ "$HASH" == "$SOURCEHASH" ]]; then if [[ "$HASH" == "$SOURCEHASH" ]]; then
info "Elastic Agent source hash is good." info "Elastic Agent source hash is good."
@@ -991,9 +991,8 @@ download_elastic_agent_artifacts() {
info "Unable to download the Elastic Agent source files." info "Unable to download the Elastic Agent source files."
fail_setup fail_setup
fi fi
logCmd "tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
fi fi
logCmd "tar -xf $agentArchive -C $beatsDir"
} }
installer_progress_loop() { installer_progress_loop() {
@@ -1150,45 +1149,7 @@ elasticsearch_pillar() {
" query:"\ " query:"\
" bool:"\ " bool:"\
" max_clause_count: 3500"\ " max_clause_count: 3500"\
" index_settings:"\ > $elasticsearch_pillar_file " index_settings: {}" > $elasticsearch_pillar_file
for INDEX in aws azure barracuda beats bluecoat cef checkpoint cisco cyberark cylance elasticsearch endgame f5 firewall fortinet gcp google_workspace imperva infoblox juniper kibana logstash microsoft misp netflow netscout o365 okta osquery proofpoint radware redis snort snyk sonicwall sophos strelka syslog tomcat zeek zscaler
do
printf '%s\n'\
" so-$INDEX:"\
" warm: 7"\
" close: 30"\
" delete: 365"\
" index_sorting: False"\
" index_template:"\
" template:"\
" settings:"\
" index:"\
" mapping:"\
" total_fields:"\
" limit: 5000"\
" refresh_interval: 30s"\
" number_of_shards: 1"\
" number_of_replicas: 0" >> $elasticsearch_pillar_file
done
for INDEX in import
do
printf '%s\n'\
" so-$INDEX:"\
" warm: 7"\
" close: 73000"\
" delete: 73001"\
" index_sorting: False"\
" index_template:"\
" template:"\
" settings:"\
" index:"\
" mapping:"\
" total_fields:"\
" limit: 5000"\
" refresh_interval: 30s"\
" number_of_shards: 1"\
" number_of_replicas: 0" >> $elasticsearch_pillar_file
done
} }
es_heapsize() { es_heapsize() {

View File

@@ -489,9 +489,13 @@ if ! [[ -f $install_opt_file ]]; then
check_requirements "heavynode" check_requirements "heavynode"
calculate_useable_cores calculate_useable_cores
networking_needful networking_needful
check_network_manager_conf
set_network_dev_status_list
collect_mngr_hostname collect_mngr_hostname
add_mngr_ip_to_hosts add_mngr_ip_to_hosts
check_manager_connection check_manager_connection
detect_cloud
whiptail_sensor_nics
set_minion_info set_minion_info
whiptail_end_settings whiptail_end_settings

View File

@@ -36,7 +36,7 @@ log_has_errors() {
# Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target # Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target
# may be requested by dependency only (it is configured to refuse manual start/stop). # may be requested by dependency only (it is configured to refuse manual start/stop).
grep -E "FAILED|Failed|failed|ERROR|Result: False" "$setup_log" | \ grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \
grep -vE "The Salt Master has cached the public key for this node" | \ grep -vE "The Salt Master has cached the public key for this node" | \
grep -vE "Minion failed to authenticate with the master" | \ grep -vE "Minion failed to authenticate with the master" | \
grep -vE "Failed to connect to ::1" | \ grep -vE "Failed to connect to ::1" | \