Compare commits

..

1 Commits

Author SHA1 Message Date
m0duspwnens
50ab63162a users 2024-01-17 12:51:15 -05:00
248 changed files with 4812 additions and 252717 deletions

View File

@@ -536,10 +536,11 @@ secretGroup = 4
[allowlist]
description = "global allow lists"
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''']
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''']
paths = [
'''gitleaks.toml''',
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
'''(go.mod|go.sum)$''',
'''salt/nginx/files/enterprise-attack.json'''
]

View File

@@ -1,190 +0,0 @@
body:
- type: markdown
attributes:
value: |
⚠️ This category is solely for conversations related to Security Onion 2.4 ⚠️
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
- type: dropdown
attributes:
label: Version
description: Which version of Security Onion 2.4.x are you asking about?
options:
-
- 2.4 Pre-release (Beta, Release Candidate)
- 2.4.10
- 2.4.20
- 2.4.30
- 2.4.40
- 2.4.50
- 2.4.60
- 2.4.70
- 2.4.80
- 2.4.90
- 2.4.100
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Method
description: How did you install Security Onion?
options:
-
- Security Onion ISO image
- Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc.
- Network installation on Ubuntu
- Network installation on Debian
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Description
description: >
Is this discussion about installation, configuration, upgrading, or other?
options:
-
- installation
- configuration
- upgrading
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Type
description: >
When you installed, did you choose Import, Eval, Standalone, Distributed, or something else?
options:
-
- Import
- Eval
- Standalone
- Distributed
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Location
description: >
Is this deployment in the cloud, on-prem with Internet access, or airgap?
options:
-
- cloud
- on-prem with Internet access
- airgap
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Hardware Specs
description: >
Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://docs.securityonion.net/en/2.4/hardware.html?
options:
-
- Meets minimum requirements
- Exceeds minimum requirements
- Does not meet minimum requirements
- other (please provide detail below)
validations:
required: true
- type: input
attributes:
label: CPU
description: How many CPU cores do you have?
validations:
required: true
- type: input
attributes:
label: RAM
description: How much RAM do you have?
validations:
required: true
- type: input
attributes:
label: Storage for /
description: How much storage do you have for the / partition?
validations:
required: true
- type: input
attributes:
label: Storage for /nsm
description: How much storage do you have for the /nsm partition?
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Collection
description: >
Are you collecting network traffic from a tap or span port?
options:
-
- tap
- span port
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Speeds
description: >
How much network traffic are you monitoring?
options:
-
- Less than 1Gbps
- 1Gbps to 10Gbps
- more than 10Gbps
validations:
required: true
- type: dropdown
attributes:
label: Status
description: >
Does SOC Grid show all services on all nodes as running OK?
options:
-
- Yes, all services on all nodes are running OK
- No, one or more services are failed (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Salt Status
description: >
Do you get any failures when you run "sudo salt-call state.highstate"?
options:
-
- Yes, there are salt failures (please provide detail below)
- No, there are no failures
validations:
required: true
- type: dropdown
attributes:
label: Logs
description: >
Are there any additional clues in /opt/so/log/?
options:
-
- Yes, there are additional clues in /opt/so/log/ (please provide detail below)
- No, there are no additional clues
validations:
required: true
- type: textarea
attributes:
label: Detail
description: Please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and then provide detailed information to help us help you.
placeholder: |-
STOP! Before typing, please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 in their entirety!
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
validations:
required: true
- type: checkboxes
attributes:
label: Guidelines
options:
- label: I have read the discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and assert that I have followed the guidelines.
required: true

View File

@@ -1,33 +0,0 @@
name: 'Close Threads'
on:
schedule:
- cron: '50 1 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
discussions: write
concurrency:
group: lock-threads
jobs:
close-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: -1
days-before-issue-close: 60
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
days-before-pr-stale: 45
days-before-pr-close: 60
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."

View File

@@ -1,26 +0,0 @@
name: 'Lock Threads'
on:
schedule:
- cron: '50 2 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
discussions: write
concurrency:
group: lock-threads
jobs:
lock-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest
steps:
- uses: jertel/lock-threads@main
with:
include-discussion-currently-open: true
discussion-inactive-days: 90
issue-inactive-days: 30
pr-inactive-days: 30

View File

@@ -1,17 +1,17 @@
### 2.4.70-20240529 ISO image released on 2024/05/29
### 2.4.30-20231228 ISO image released on 2024/01/02
### Download and Verify
2.4.70-20240529 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
2.4.30-20231228 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
MD5: 8FCCF31C2470D1ABA380AF196B611DEC
SHA1: EE5E8F8C14819E7A1FE423E6920531A97F39600B
SHA256: EF5E781D50D50660F452ADC54FD4911296ECBECED7879FA8E04687337CA89BEC
MD5: DBD47645CD6FA8358C51D8753046FB54
SHA1: 2494091065434ACB028F71444A5D16E8F8A11EDF
SHA256: 3345AE1DC58AC7F29D82E60D9A36CDF8DE19B7DFF999D8C4F89C7BD36AEE7F1D
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.70-20240529.iso.sig securityonion-2.4.70-20240529.iso
gpg --verify securityonion-2.4.30-20231228.iso.sig securityonion-2.4.30-20231228.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Wed 29 May 2024 11:40:59 AM EDT using RSA key ID FE507013
gpg: Signature made Thu 28 Dec 2023 10:08:31 AM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -8,22 +8,19 @@ Alerts
![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png)
Dashboards
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_dashboards.png)
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/51_dashboards.png)
Hunt
![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png)
Detections
![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_detections.png)
![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/52_hunt.png)
PCAP
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png)
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_pcap.png)
Grid
![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/75_grid.png)
![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_grid.png)
Config
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/87_config.png)
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/61_config.png)
### Release Notes

View File

@@ -1 +1 @@
2.4.70
2.4.40

View File

@@ -41,8 +41,7 @@ file_roots:
base:
- /opt/so/saltstack/local/salt
- /opt/so/saltstack/default/salt
- /nsm/elastic-fleet/artifacts
- /opt/so/rules/nids
# The master_roots setting configures a master-only copy of the file_roots dictionary,
# used by the state compiler.

View File

@@ -16,6 +16,7 @@ base:
- sensoroni.adv_sensoroni
- telegraf.soc_telegraf
- telegraf.adv_telegraf
- users
'* and not *_desktop':
- firewall.soc_firewall
@@ -43,6 +44,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- kratos.soc_kratos
@@ -59,9 +62,10 @@ base:
- elastalert.adv_elastalert
- backup.soc_backup
- backup.adv_backup
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
'*_sensor':
- healthcheck.sensor
@@ -77,8 +81,6 @@ base:
- suricata.adv_suricata
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
'*_eval':
- secrets
@@ -104,6 +106,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- strelka.soc_strelka
@@ -159,6 +163,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- strelka.soc_strelka
@@ -175,7 +181,6 @@ base:
- suricata.adv_suricata
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
'*_heavynode':
- elasticsearch.auth
@@ -218,8 +223,6 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
'*_receiver':
- logstash.nodes
@@ -254,6 +257,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- backup.soc_backup

2
pillar/users/init.sls Normal file
View File

@@ -0,0 +1,2 @@
# users pillar goes in /opt/so/saltstack/local/pillar/users/init.sls
# the users directory may need to be created under /opt/so/saltstack/local/pillar

View File

@@ -0,0 +1,18 @@
users:
sclapton:
# required fields
status: present
# node_access determines which node types the user can access.
# this can either be by grains.role or by final part of the minion id after the _
node_access:
- standalone
- searchnode
# optional fields
fullname: Stevie Claptoon
uid: 1001
gid: 1001
homephone: does not have a phone
groups:
- mygroup1
- mygroup2
- wheel # give sudo access

20
pillar/users/pillar.usage Normal file
View File

@@ -0,0 +1,20 @@
users:
sclapton:
# required fields
status: <present | absent>
# node_access determines which node types the user can access.
# this can either be by grains.role or by final part of the minion id after the _
node_access:
- standalone
- searchnode
# optional fields
fullname: <string>
uid: <integer>
gid: <integer>
roomnumber: <string>
workphone: <string>
homephone: <string>
groups:
- <string>
- <string>
- wheel # give sudo access

View File

@@ -34,6 +34,7 @@
'suricata',
'utility',
'schedule',
'soctopus',
'tcpreplay',
'docker_clean'
],
@@ -65,7 +66,6 @@
'registry',
'manager',
'nginx',
'strelka.manager',
'soc',
'kratos',
'influxdb',
@@ -92,7 +92,6 @@
'nginx',
'telegraf',
'influxdb',
'strelka.manager',
'soc',
'kratos',
'elasticfleet',
@@ -102,8 +101,8 @@
'suricata.manager',
'utility',
'schedule',
'docker_clean',
'stig'
'soctopus',
'docker_clean'
],
'so-managersearch': [
'salt.master',
@@ -113,7 +112,6 @@
'nginx',
'telegraf',
'influxdb',
'strelka.manager',
'soc',
'kratos',
'elastic-fleet-package-registry',
@@ -124,8 +122,8 @@
'suricata.manager',
'utility',
'schedule',
'docker_clean',
'stig'
'soctopus',
'docker_clean'
],
'so-searchnode': [
'ssl',
@@ -133,8 +131,7 @@
'telegraf',
'firewall',
'schedule',
'docker_clean',
'stig'
'docker_clean'
],
'so-standalone': [
'salt.master',
@@ -157,9 +154,9 @@
'healthcheck',
'utility',
'schedule',
'soctopus',
'tcpreplay',
'docker_clean',
'stig'
'docker_clean'
],
'so-sensor': [
'ssl',
@@ -171,15 +168,13 @@
'healthcheck',
'schedule',
'tcpreplay',
'docker_clean',
'stig'
'docker_clean'
],
'so-fleet': [
'ssl',
'telegraf',
'firewall',
'logstash',
'nginx',
'healthcheck',
'schedule',
'elasticfleet',
@@ -199,6 +194,10 @@
],
}, grain='role') %}
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
{% do allowed_states.append('mysql') %}
{% endif %}
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
{% do allowed_states.append('zeek') %}
{%- endif %}
@@ -224,6 +223,10 @@
{% do allowed_states.append('elastalert') %}
{% endif %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
{% do allowed_states.append('playbook') %}
{% endif %}
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
{% do allowed_states.append('logstash') %}
{% endif %}

View File

@@ -1,10 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.pcap_engine == "TRANSITION" %}
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
{% else %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %}
{% endif %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %}

View File

@@ -1,6 +1,6 @@
bpf:
pcap:
description: List of BPF filters to apply to Stenographer.
description: List of BPF filters to apply to PCAP.
multiline: True
forcedType: "[]string"
helpLink: bpf.html

View File

@@ -4,6 +4,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- common.soup_scripts
- common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state
@@ -133,18 +134,6 @@ common_sbin_jinja:
- file_mode: 755
- template: jinja
{% if not GLOBALS.is_manager%}
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
# these two states remove the scripts from non manager nodes
remove_soup:
file.absent:
- name: /usr/sbin/soup
remove_so-firewall:
file.absent:
- name: /usr/sbin/so-firewall
{% endif %}
so-status_script:
file.managed:
- name: /usr/sbin/so-status

View File

@@ -1,104 +1,23 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# Sync some Utilities
soup_scripts:
file.recurse:
- name: /usr/sbin
- user: root
- group: root
- file_mode: 755
- source: salt://common/tools/sbin
- include_pat:
- so-common
- so-image-common
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
{% if SOC_GLOBAL.global.airgap %}
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
{% else %}
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
{% endif %}
remove_common_soup:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
remove_common_so-firewall:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
# This section is used to put the scripts in place in the Salt file system
# in case a state run tries to overwrite what we do in the next section.
copy_so-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
# This section is used to put the new script in place so that it can be called during soup.
# It is faster than calling the states that normally manage them to put them in place.
copy_so-common_sbin:
file.copy:
- name: /usr/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_sbin:
file.copy:
- name: /usr/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_sbin:
file.copy:
- name: /usr/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_sbin:
file.copy:
- name: /usr/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_sbin:
file.copy:
- name: /usr/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
{% else %}
fix_23_soup_sbin:
cmd.run:
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
fix_23_soup_salt:
cmd.run:
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
{% endif %}
soup_manager_scripts:
file.recurse:
- name: /usr/sbin
- user: root
- group: root
- file_mode: 755
- source: salt://manager/tools/sbin
- include_pat:
- so-firewall
- so-repo-sync
- soup

View File

@@ -5,13 +5,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
cat << EOF
so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see:
https://docs.securityonion.net/en/2.4/salt.html
EOF
salt-call state.highstate -l info queue=True
salt-call state.highstate -l info

View File

@@ -179,21 +179,6 @@ copy_new_files() {
cd /tmp
}
create_local_directories() {
echo "Creating local pillar and salt directories if needed"
PILLARSALTDIR=$1
local_salt_dir="/opt/so/saltstack/local"
for i in "pillar" "salt"; do
for d in $(find $PILLARSALTDIR/$i -type d); do
suffixdir=${d//$PILLARSALTDIR/}
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
mkdir -pv $local_salt_dir$suffixdir
fi
done
chown -R socore:socore $local_salt_dir/$i
done
}
disable_fastestmirror() {
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
}
@@ -263,14 +248,6 @@ get_random_value() {
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
else
AGENTCOUNT=0
fi
}
gpg_rpm_import() {
if [[ $is_oracle ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
@@ -352,7 +329,7 @@ lookup_salt_value() {
local=""
fi
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local}
salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
}
lookup_pillar() {
@@ -389,13 +366,6 @@ is_feature_enabled() {
return 1
}
read_feat() {
if [ -f /opt/so/log/sostatus/lks_enabled ]; then
lic_id=$(cat /opt/so/saltstack/local/pillar/soc/license.sls | grep license_id: | awk '{print $2}')
echo "$lic_id/$(cat /opt/so/log/sostatus/lks_enabled)/$(cat /opt/so/log/sostatus/fps_enabled)"
fi
}
require_manager() {
if is_manager_node; then
echo "This is a manager, so we can proceed."
@@ -589,15 +559,6 @@ status () {
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
}
sync_options() {
set_version
set_os
salt_minion_count
get_agent_count
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
}
systemctl_func() {
local action=$1
local echo_action=$1

View File

@@ -8,7 +8,6 @@
import sys
import subprocess
import os
import json
sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/')
import salt.config
@@ -37,67 +36,17 @@ def check_needs_restarted():
with open(outfile, 'w') as f:
f.write(val)
def check_for_fps():
feat = 'fps'
feat_full = feat.replace('ps', 'ips')
fps = 0
try:
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
if result.returncode == 0:
fps = 1
except FileNotFoundError:
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
try:
with open(fn, 'r') as f:
contents = f.read()
if '1' in contents:
fps = 1
except:
# Unknown, so assume 0
fps = 0
with open('/opt/so/log/sostatus/fps_enabled', 'w') as f:
f.write(str(fps))
def check_for_lks():
feat = 'Lks'
feat_full = feat.replace('ks', 'uks')
lks = 0
result = subprocess.run(['lsblk', '-p', '-J'], check=True, stdout=subprocess.PIPE)
data = json.loads(result.stdout)
for device in data['blockdevices']:
if 'children' in device:
for gc in device['children']:
if 'children' in gc:
try:
arg = 'is' + feat_full
result = subprocess.run(['cryptsetup', arg, gc['name']], stdout=subprocess.PIPE)
if result.returncode == 0:
lks = 1
except FileNotFoundError:
for ggc in gc['children']:
if 'crypt' in ggc['type']:
lks = 1
if lks:
break
with open('/opt/so/log/sostatus/lks_enabled', 'w') as f:
f.write(str(lks))
def fail(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def main():
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
if proc.stdout.strip() != "0":
fail("This program must be run as root")
# Ensure that umask is 0022 so that files created by this script have rw-r-r permissions
org_umask = os.umask(0o022)
check_needs_restarted()
check_for_fps()
check_for_lks()
# Restore umask to whatever value was set before this script was run. SXIG sets to 0077 rw---
os.umask(org_umask)
if __name__ == "__main__":
main()

View File

@@ -53,10 +53,13 @@ container_list() {
"so-kibana"
"so-kratos"
"so-logstash"
"so-mysql"
"so-nginx"
"so-pcaptools"
"so-playbook"
"so-redis"
"so-soc"
"so-soctopus"
"so-steno"
"so-strelka-backend"
"so-strelka-filestream"

View File

@@ -49,6 +49,10 @@ if [ "$CONTINUE" == "y" ]; then
sed -i "s|$OLD_IP|$NEW_IP|g" $file
done
echo "Granting MySQL root user permissions on $NEW_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
echo "Removing MySQL root user from $OLD_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
echo "Updating Kibana dashboards"
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True

View File

@@ -122,7 +122,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -155,11 +154,15 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
@@ -198,13 +201,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
fi
RESULT=0
@@ -213,9 +210,7 @@ RESULT=0
CONTAINER_IDS=$(docker ps -q)
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
exclude_container so-idstools # ignore due to known issues and noisy logging
exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-playbook # ignore due to several playbook known issues
for container_id in $CONTAINER_IDS; do
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
@@ -233,14 +228,10 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet
exclude_log "spool" # disregard zeek analyze logs as this is data specific
exclude_log "import" # disregard imported test data the contains error strings
exclude_log "update.log" # ignore playbook updates due to several known issues
exclude_log "playbook.log" # ignore due to several playbook known issues
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
exclude_log "cron-close.log" # ignore since Curator has been removed
exclude_log "curator.log" # ignore since Curator has been removed
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
exclude_log "curator.log" # ignore since Curator has been removed
for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file"

View File

@@ -1,98 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0."
set -e
# This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot.
if [ -z $NOROOT ]; then
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
fi
ENROLL_TPM=N
while [[ $# -gt 0 ]]; do
case $1 in
--enroll-tpm)
ENROLL_TPM=Y
;;
*)
echo "Usage: $0 [options]"
echo ""
echo "where options are:"
echo " --enroll-tpm for when TPM enrollment was not selected during ISO install."
echo ""
exit 1
;;
esac
shift
done
check_for_tpm() {
echo -n "Checking for TPM: "
if [ -d /sys/class/tpm/tpm0 ]; then
echo -e "tpm0 found."
TPM="yes"
# Check if TPM is using sha1 or sha256
if [ -d /sys/class/tpm/tpm0/pcr-sha1 ]; then
echo -e "TPM is using sha1.\n"
TPM_PCR="sha1"
elif [ -d /sys/class/tpm/tpm0/pcr-sha256 ]; then
echo -e "TPM is using sha256.\n"
TPM_PCR="sha256"
fi
else
echo -e "No TPM found.\n"
exit 1
fi
}
check_for_luks_partitions() {
echo "Checking for LUKS partitions"
for part in $(lsblk -o NAME,FSTYPE -ln | grep crypto_LUKS | awk '{print $1}'); do
echo "Found LUKS partition: $part"
LUKS_PARTITIONS+=("$part")
done
if [ ${#LUKS_PARTITIONS[@]} -eq 0 ]; then
echo -e "No LUKS partitions found.\n"
exit 1
fi
echo ""
}
enroll_tpm_in_luks() {
read -s -p "Enter the LUKS passphrase used during ISO install: " LUKS_PASSPHRASE
echo ""
for part in "${LUKS_PARTITIONS[@]}"; do
echo "Enrolling TPM for LUKS device: /dev/$part"
if [ "$TPM_PCR" == "sha1" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha1","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
elif [ "$TPM_PCR" == "sha256" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
fi
done
}
regenerate_tpm_enrollment_token() {
for part in "${LUKS_PARTITIONS[@]}"; do
clevis luks regen -d /dev/$part -s 1 -q
done
}
check_for_tpm
check_for_luks_partitions
if [[ $ENROLL_TPM == "Y" ]]; then
enroll_tpm_in_luks
else
regenerate_tpm_enrollment_token
fi
echo "Running dracut"
dracut -fv
echo -e "\nTPM configuration complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n"

View File

@@ -89,7 +89,6 @@ function suricata() {
-v ${LOG_PATH}:/var/log/suricata/:rw \
-v ${NSM_PATH}/:/nsm/:rw \
-v "$PCAP:/input.pcap:ro" \
-v /dev/null:/nsm/suripcap:rw \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
@@ -248,7 +247,7 @@ fi
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
status "Import complete!"
status

View File

@@ -334,7 +334,6 @@ desktop_packages:
- pulseaudio-libs
- pulseaudio-libs-glib2
- pulseaudio-utils
- putty
- sane-airscan
- sane-backends
- sane-backends-drivers-cameras

View File

@@ -67,6 +67,13 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-mysql':
final_octet: 30
port_bindings:
- 0.0.0.0:3306:3306
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-nginx':
final_octet: 31
port_bindings:
@@ -77,10 +84,10 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-nginx-fleet-node':
final_octet: 31
'so-playbook':
final_octet: 32
port_bindings:
- 8443:8443
- 0.0.0.0:3000:3000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
@@ -104,6 +111,13 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-soctopus':
final_octet: 35
port_bindings:
- 0.0.0.0:7000:7000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-backend':
final_octet: 36
custom_bind_mounts: []
@@ -180,8 +194,6 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
ulimits:
- memlock=524288000
'so-zeek':
final_octet: 99
custom_bind_mounts: []

View File

@@ -46,11 +46,13 @@ docker:
so-kibana: *dockerOptions
so-kratos: *dockerOptions
so-logstash: *dockerOptions
so-mysql: *dockerOptions
so-nginx: *dockerOptions
so-nginx-fleet-node: *dockerOptions
so-playbook: *dockerOptions
so-redis: *dockerOptions
so-sensoroni: *dockerOptions
so-soc: *dockerOptions
so-soctopus: *dockerOptions
so-strelka-backend: *dockerOptions
so-strelka-filestream: *dockerOptions
so-strelka-frontend: *dockerOptions
@@ -63,41 +65,5 @@ docker:
so-elastic-agent: *dockerOptions
so-telegraf: *dockerOptions
so-steno: *dockerOptions
so-suricata:
final_octet:
description: Last octet of the container IP address.
helpLink: docker.html
readonly: True
advanced: True
global: True
port_bindings:
description: List of port bindings for the container.
helpLink: docker.html
advanced: True
multiline: True
forcedType: "[]string"
custom_bind_mounts:
description: List of custom local volume bindings.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_hosts:
description: List of additional host entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
ulimits:
description: Ulimits for the container, in bytes.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-suricata: *dockerOptions
so-zeek: *dockerOptions

View File

@@ -82,36 +82,6 @@ elastasomodulesync:
- group: 933
- makedirs: True
elastacustomdir:
file.directory:
- name: /opt/so/conf/elastalert/custom
- user: 933
- group: 933
- makedirs: True
elastacustomsync:
file.recurse:
- name: /opt/so/conf/elastalert/custom
- source: salt://elastalert/files/custom
- user: 933
- group: 933
- makedirs: True
- file_mode: 660
- show_changes: False
elastapredefinedsync:
file.recurse:
- name: /opt/so/conf/elastalert/predefined
- source: salt://elastalert/files/predefined
- user: 933
- group: 933
- makedirs: True
- template: jinja
- file_mode: 660
- context:
elastalert: {{ ELASTALERTMERGED }}
- show_changes: False
elastaconf:
file.managed:
- name: /opt/so/conf/elastalert/elastalert_config.yaml

View File

@@ -1,6 +1,5 @@
elastalert:
enabled: False
alerter_parameters: ""
config:
rules_folder: /opt/elastalert/rules/
scan_subdirectories: true

View File

@@ -30,8 +30,6 @@ so-elastalert:
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
- /opt/so/log/elastalert:/var/log/elastalert:rw
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
- /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro
- /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}

View File

@@ -1 +0,0 @@
THIS IS A PLACEHOLDER FILE

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PlaybookESAlerter(Alerter):
"""
Use matched data to create alerts in elasticsearch
"""
required_options = set(['play_title','play_url','sigma_level'])
def alert(self, matches):
for match in matches:
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'PlaybookESAlerter'}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class SecurityOnionESAlerter(Alerter):
"""
Use matched data to create alerts in Elasticsearch.
"""
required_options = set(['detection_title', 'sigma_level'])
optional_fields = ['sigma_category', 'sigma_product', 'sigma_service']
def alert(self, matches):
for match in matches:
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
# Start building the rule dict
rule_info = {
"name": self.rule['detection_title'],
"uuid": self.rule['detection_public_id']
}
# Add optional fields if they are present in the rule
for field in self.optional_fields:
rule_key = field.split('_')[-1] # Assumes field format "sigma_<key>"
if field in self.rule:
rule_info[rule_key] = self.rule[field]
# Construct the payload with the conditional rule_info
payload = {
"tags": "alert",
"rule": rule_info,
"event": {
"severity": self.rule['event.severity'],
"module": self.rule['event.module'],
"dataset": self.rule['event.dataset'],
"severity_label": self.rule['sigma_level']
},
"sigma_level": self.rule['sigma_level'],
"event_data": match,
"@timestamp": timestamp
}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-detections.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'SecurityOnionESAlerter'}

View File

@@ -1,6 +0,0 @@
{% if elastalert.get('jira_user', '') | length > 0 and elastalert.get('jira_pass', '') | length > 0 %}
user: {{ elastalert.jira_user }}
password: {{ elastalert.jira_pass }}
{% else %}
apikey: {{ elastalert.get('jira_api_key', '') }}
{% endif %}

View File

@@ -1,2 +0,0 @@
user: {{ elastalert.get('smtp_user', '') }}
password: {{ elastalert.get('smtp_pass', '') }}

View File

@@ -13,19 +13,3 @@
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
{% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %}
{% if 'ntf' in salt['pillar.get']('features', []) %}
{% set params = ELASTALERTMERGED.get('alerter_parameters', '') | load_yaml %}
{% if params != None and params | length > 0 %}
{% do ELASTALERTMERGED.config.update(params) %}
{% endif %}
{% if ELASTALERTMERGED.get('smtp_user', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %}
{% endif %}
{% if ELASTALERTMERGED.get('jira_user', '') | length > 0 or ELASTALERTMERGED.get('jira_key', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %}
{% endif %}
{% endif %}

View File

@@ -2,99 +2,6 @@ elastalert:
enabled:
description: You can enable or disable Elastalert.
helpLink: elastalert.html
alerter_parameters:
title: Alerter Parameters
description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: elastalert.html
forcedType: string
jira_api_key:
title: Jira API Key
description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_pass:
title: Jira Password
description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_user:
title: Jira Username
description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
smtp_pass:
title: SMTP Password
description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
smtp_user:
title: SMTP Username
description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
files:
custom:
alertmanager_ca__crt:
description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
gelf_ca__crt:
description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post2_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
ms_teams_ca__crt:
description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
pagerduty_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
rocket_chat_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__crt:
description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__key:
description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
slack_ca__crt:
description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
config:
disable_rules_on_error:
description: Disable rules on failure.

View File

@@ -37,7 +37,6 @@ elasticfleet:
- azure
- barracuda
- carbonblack_edr
- cef
- checkpoint
- cisco_asa
- cisco_duo
@@ -46,8 +45,6 @@ elasticfleet:
- cisco_ise
- cisco_meraki
- cisco_umbrella
- citrix_adc
- citrix_waf
- cloudflare
- crowdstrike
- darktrace
@@ -66,7 +63,6 @@ elasticfleet:
- http_endpoint
- httpjson
- iis
- journald
- juniper
- juniper_srx
- kafka_log
@@ -79,7 +75,6 @@ elasticfleet:
- mimecast
- mysql
- netflow
- nginx
- o365
- okta
- osquery_manager
@@ -108,7 +103,6 @@ elasticfleet:
- udp
- vsphere
- windows
- winlog
- zscaler_zia
- zscaler_zpa
- 1password
@@ -119,8 +113,3 @@ elasticfleet:
base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m
limit: 100
kismet:
base_url: http://localhost:2501
poll_interval: 1m
api_key:
enabled_nodes: []

View File

@@ -17,11 +17,6 @@ include:
- elasticfleet.sostatus
- ssl
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet:
cmd.run:
- name: so-elasticsearch-wait
# If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs:
@@ -38,26 +33,12 @@ so-elastic-fleet-auto-configure-server-urls:
- retry: True
{% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
# Automatically update Fleet Server Elasticsearch URLs
{% if grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry: True
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry: True
{% endif %}
# Sync Elastic Agent artifacts to Fleet Node
{% if grains.role in ['so-fleet'] %}
elasticagent_syncartifacts:
file.recurse:
- name: /nsm/elastic-fleet/artifacts/beats
- source: salt://beats
{% endif %}
{% if SERVICETOKEN != '' %}

View File

@@ -1,36 +0,0 @@
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% raw %}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "kismet-logs",
"namespace": "so",
"description": "Kismet Logs",
"policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}",
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"data_stream.dataset": "kismet",
"request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson",
"request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}",
"request_method": "GET",
"request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'",
"request_redirect_headers_ban_list": [],
"oauth_scopes": [],
"processors": "",
"tags": [],
"pipeline": "kismet.common"
}
}
}
}
},
"force": true
}
{% endraw %}

View File

@@ -1,29 +0,0 @@
{
"package": {
"name": "winlog",
"version": ""
},
"name": "windows-defender",
"namespace": "default",
"description": "Windows Defender - Operational logs",
"policy_id": "endpoints-initial",
"inputs": {
"winlogs-winlog": {
"enabled": true,
"streams": {
"winlog.winlog": {
"enabled": true,
"vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational",
"data_stream.dataset": "winlog.winlog",
"preserve_original_event": false,
"providers": [],
"ignore_older": "72h",
"language": 0,
"tags": [] }
}
}
}
},
"force": true
}

View File

@@ -1,34 +0,0 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "rita-logs",
"namespace": "so",
"description": "RITA Logs",
"policy_id": "so-grid-nodes_general",
"vars": {},
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/rita/beacons.csv",
"/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "rita",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
}
}
}
}
}
}

View File

@@ -1,35 +0,0 @@
{
"policy_id": "so-grid-nodes_general",
"package": {
"name": "log",
"version": ""
},
"name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs",
"namespace": "so",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc",
"tags": [
"so-soc"
],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}

View File

@@ -16,9 +16,6 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -28,9 +25,6 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -16,9 +16,6 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -28,9 +25,6 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -79,29 +79,3 @@ elasticfleet:
helpLink: elastic-fleet.html
advanced: True
forcedType: int
kismet:
base_url:
description: Base URL for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
api_key:
description: API key for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
enabled_nodes:
description: Fleet nodes with the Kismet integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"

View File

@@ -46,7 +46,7 @@ do
done
printf "\n### Stripping out unused components"
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*heart.*\|.*cloud.*' -delete
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete
printf "\n### Tarring everything up again"
for OS in "${OSARCH[@]}"
@@ -72,5 +72,5 @@ do
printf "\n### $GOOS/$GOARCH Installer Generated...\n"
done
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n"
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace"
rm -rf /nsm/elastic-agent-workspace

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -1,90 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
# Only run on Managers
if ! is_manager_node; then
printf "Not a Manager Node... Exiting"
exit 0
fi
# Function to check if an array contains a value
array_contains () {
local array="$1[@]"
local seeking=$2
local in=1
for element in "${!array}"; do
if [[ $element == "$seeking" ]]; then
in=0
break
fi
done
return $in
}
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
LOGSTASHNODES='{{ salt['pillar.get']('logstash:nodes', {}) | tojson }}'
# Initialize an array for new hosts from Fleet Nodes
declare -a NEW_LIST=()
# Query for Fleet Nodes & add them to the list (Hostname)
if grep -q "fleet" <<< "$LOGSTASHNODES"; then
readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES")
for NODE in "${FLEETNODES[@]}"; do
URL="http://$NODE:8443/artifacts/"
NAME="FleetServer_$NODE"
NEW_LIST+=("$URL=$NAME")
done
fi
# Create an array for expected hosts and their names
declare -A expected_urls=(
["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}"
["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts"
)
# Merge NEW_LIST into expected_urls
for entry in "${NEW_LIST[@]}"; do
# Extract URL and Name from each entry
IFS='=' read -r URL NAME <<< "$entry"
# Add to expected_urls, automatically handling URL as key and NAME as value
expected_urls["$URL"]="$NAME"
done
# Fetch the current hosts from the API
current_urls=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host)
# Convert current hosts to an array
IFS=$'\n' read -rd '' -a current_urls_array <<<"$current_urls"
# Flag to track if any host was added
any_url_added=0
# Check each expected host
for host in "${!expected_urls[@]}"; do
array_contains current_urls_array "$host" || {
echo "$host (${expected_urls[$host]}) is missing. Adding it..."
# Prepare the JSON payload
JSON_STRING=$( jq -n \
--arg NAME "${expected_urls[$host]}" \
--arg URL "$host" \
'{"name":$NAME,"host":$URL}' )
# Create the missing host
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Flag that an artifact URL was added
any_url_added=1
}
done
if [[ $any_url_added -eq 0 ]]; then
echo "All expected artifact URLs are present. No updates needed."
fi

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -118,19 +118,6 @@ esingestconf:
- user: 930
- group: 939
# Auto-generate Elasticsearch ingest node pipelines from pillar
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
es_ingest_conf_{{pipeline}}:
file.managed:
- name: /opt/so/conf/elasticsearch/ingest/{{ pipeline }}
- source: salt://elasticsearch/base-template.json.jinja
- defaults:
TEMPLATE_CONFIG: {{ config }}
- template: jinja
- onchanges_in:
- file: so-pipelines-reload
{% endfor %}
eslog4jfile:
file.managed:
- name: /opt/so/conf/elasticsearch/log4j2.properties

File diff suppressed because it is too large Load Diff

View File

@@ -200,15 +200,9 @@ so-elasticsearch-roles-load:
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
{% if ELASTICSEARCHMERGED.index_clean %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
so-elasticsearch-indices-delete:
cron.{{ap}}:
cron.present:
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
- identifier: so-elasticsearch-indices-delete
- user: root
@@ -217,8 +211,7 @@ so-elasticsearch-indices-delete:
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}
{% endif %}
{% endif %}
{% else %}

View File

@@ -57,11 +57,10 @@
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } },
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }},
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }},
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
{%- endraw %}
{%- if HIGHLANDER %}

View File

@@ -68,7 +68,7 @@
"field": "_security",
"ignore_missing": true
}
},
},
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
@@ -80,10 +80,9 @@
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [

View File

@@ -1,10 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -1,50 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked",
"target_field": "network.wireless.ssid_cloaked",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid",
"target_field": "network.wireless.ssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null"
}
},
{
"set": {
"field": "network.wireless.ssid",
"value": "Hidden",
"if": "ctx?.network?.wireless?.ssid_cloaked != null && ctx?.network?.wireless?.ssid_cloaked == 1"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc",
"target_field": "network.wireless.channel_utilization",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -1,16 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -1,29 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.last_connected_bssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_client_map",
"processor": {
"append": {
"field": "network.wireless.known_connected_bssid",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_client_map != null"
}
}
]
}

View File

@@ -1,159 +0,0 @@
{
"processors": [
{
"json": {
"field": "message",
"target_field": "message2"
}
},
{
"date": {
"field": "message2.kismet_device_base_mod_time",
"formats": [
"epoch_second"
],
"target_field": "@timestamp"
}
},
{
"set": {
"field": "event.category",
"value": "network"
}
},
{
"dissect": {
"field": "message2.kismet_device_base_type",
"pattern": "%{wifi} %{device_type}"
}
},
{
"lowercase": {
"field": "device_type"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.{{device_type}}"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.wds_ap",
"if": "ctx?.device_type == 'wds ap'"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.ad_hoc",
"if": "ctx?.device_type == 'ad-hoc'"
}
},
{
"set": {
"field": "event.module",
"value": "kismet"
}
},
{
"rename": {
"field": "message2.kismet_device_base_packets_tx_total",
"target_field": "source.packets"
}
},
{
"rename": {
"field": "message2.kismet_device_base_num_alerts",
"target_field": "kismet.alerts.count"
}
},
{
"rename": {
"field": "message2.kismet_device_base_channel",
"target_field": "network.wireless.channel",
"if": "ctx?.message2?.kismet_device_base_channel != ''"
}
},
{
"rename": {
"field": "message2.kismet_device_base_frequency",
"target_field": "network.wireless.frequency",
"if": "ctx?.message2?.kismet_device_base_frequency != 0"
}
},
{
"rename": {
"field": "message2.kismet_device_base_last_time",
"target_field": "kismet.last_seen"
}
},
{
"date": {
"field": "kismet.last_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.last_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_first_time",
"target_field": "kismet.first_seen"
}
},
{
"date": {
"field": "kismet.first_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.first_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_seenby",
"target_field": "kismet.seenby"
}
},
{
"foreach": {
"field": "kismet.seenby",
"processor": {
"pipeline": {
"name": "kismet.seenby"
}
}
}
},
{
"rename": {
"field": "message2.kismet_device_base_manuf",
"target_field": "device.manufacturer"
}
},
{
"pipeline": {
"name": "{{event.dataset}}"
}
},
{
"remove": {
"field": [
"message2",
"message",
"device_type",
"wifi",
"agent",
"host",
"event.created"
],
"ignore_failure": true
}
}
]
}

View File

@@ -1,9 +0,0 @@
{
"processors": [
{
"pipeline": {
"name": "kismet.client"
}
}
]
}

View File

@@ -1,52 +0,0 @@
{
"processors": [
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_num_packets",
"target_field": "_ingest._value.packets_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_uuid",
"target_field": "_ingest._value.serial_number",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_first_time",
"target_field": "_ingest._value.first_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_last_time",
"target_field": "_ingest._value.last_seen",
"ignore_missing": true
}
},
{
"date": {
"field": "_ingest._value.first_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.first_seen",
"ignore_failure": true
}
},
{
"date": {
"field": "_ingest._value.last_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.last_seen",
"ignore_failure": true
}
}
]
}

View File

@@ -1,10 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
}
]
}

View File

@@ -1,22 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_commonname",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -1,389 +0,0 @@
{
"description": "Pipeline for pfSense",
"processors": [
{
"set": {
"field": "ecs.version",
"value": "8.10.0"
}
},
{
"set": {
"field": "observer.vendor",
"value": "netgate"
}
},
{
"set": {
"field": "observer.type",
"value": "firewall"
}
},
{
"rename": {
"field": "message",
"target_field": "event.original"
}
},
{
"set": {
"field": "event.kind",
"value": "event"
}
},
{
"set": {
"field": "event.timezone",
"value": "{{_tmp.tz_offset}}",
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
}
},
{
"grok": {
"description": "Parse syslog header",
"field": "event.original",
"patterns": [
"^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}"
],
"pattern_definitions": {
"ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?",
"TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})",
"BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:",
"BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))",
"NAME": "[[[:alnum:]]_-]+",
"SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})",
"TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?",
"OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})",
"PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})",
"BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+",
"META": "\\[[^\\]]*\\]"
}
}
},
{
"date": {
"if": "ctx._tmp.timestamp8601 != null",
"field": "_tmp.timestamp8601",
"target_field": "@timestamp",
"formats": [
"ISO8601"
]
}
},
{
"date": {
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
"field": "_tmp.timestamp",
"target_field": "@timestamp",
"formats": [
"MMM d HH:mm:ss",
"MMM d HH:mm:ss",
"MMM dd HH:mm:ss"
],
"timezone": "{{ event.timezone }}"
}
},
{
"grok": {
"description": "Set Event Provider",
"field": "process.name",
"patterns": [
"^%{HYPHENATED_WORDS:event.provider}"
],
"pattern_definitions": {
"HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b"
}
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-firewall",
"if": "ctx.event.provider == 'filterlog'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-openvpn",
"if": "ctx.event.provider == 'openvpn'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-ipsec",
"if": "ctx.event.provider == 'charon'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-dhcp",
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-unbound",
"if": "ctx.event.provider == 'unbound'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-haproxy",
"if": "ctx.event.provider == 'haproxy'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-php-fpm",
"if": "ctx.event.provider == 'php-fpm'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-squid",
"if": "ctx.event.provider == 'squid'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-suricata",
"if": "ctx.event.provider == 'suricata'"
}
},
{
"drop": {
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)"
}
},
{
"append": {
"field": "event.category",
"value": "network",
"if": "ctx.network != null"
}
},
{
"convert": {
"field": "source.address",
"target_field": "source.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"convert": {
"field": "destination.address",
"target_field": "destination.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"set": {
"field": "network.type",
"value": "ipv6",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
}
},
{
"set": {
"field": "network.type",
"value": "ipv4",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
}
},
{
"geoip": {
"field": "source.ip",
"target_field": "source.geo",
"ignore_missing": true
}
},
{
"geoip": {
"field": "destination.ip",
"target_field": "destination.geo",
"ignore_missing": true
}
},
{
"geoip": {
"ignore_missing": true,
"database_file": "GeoLite2-ASN.mmdb",
"field": "source.ip",
"target_field": "source.as",
"properties": [
"asn",
"organization_name"
]
}
},
{
"geoip": {
"database_file": "GeoLite2-ASN.mmdb",
"field": "destination.ip",
"target_field": "destination.as",
"properties": [
"asn",
"organization_name"
],
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.asn",
"target_field": "source.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.organization_name",
"target_field": "source.as.organization.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.asn",
"target_field": "destination.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.organization_name",
"target_field": "destination.as.organization.name",
"ignore_missing": true
}
},
{
"community_id": {
"target_field": "network.community_id",
"ignore_failure": true
}
},
{
"grok": {
"field": "observer.ingress.interface.name",
"patterns": [
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
],
"ignore_missing": true,
"ignore_failure": true
}
},
{
"set": {
"field": "network.vlan.id",
"copy_from": "observer.ingress.vlan.id",
"ignore_empty_value": true
}
},
{
"append": {
"field": "related.ip",
"value": "{{destination.ip}}",
"allow_duplicates": false,
"if": "ctx.destination?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.nat.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.nat?.ip != null"
}
},
{
"append": {
"field": "related.hosts",
"value": "{{destination.domain}}",
"if": "ctx.destination?.domain != null"
}
},
{
"append": {
"field": "related.user",
"value": "{{user.name}}",
"if": "ctx.user?.name != null"
}
},
{
"set": {
"field": "network.direction",
"value": "{{network.direction}}bound",
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
}
},
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"script": {
"lang": "painless",
"description": "This script processor iterates over the whole document to remove fields with null values.",
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
}
},
{
"remove": {
"field": "event.original",
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "logs-pfsense.log@custom",
"ignore_missing_pipeline": true
}
}
],
"on_failure": [
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"set": {
"field": "event.kind",
"value": "pipeline_error"
}
},
{
"append": {
"field": "error.message",
"value": "{{{ _ingest.on_failure_message }}}"
}
}
],
"_meta": {
"managed_by": "fleet",
"managed": true,
"package": {
"name": "pfsense"
}
}
}

View File

@@ -1,31 +0,0 @@
{
"description": "Pipeline for parsing pfSense Suricata logs.",
"processors": [
{
"pipeline": {
"name": "suricata.common"
}
}
],
"on_failure": [
{
"set": {
"field": "event.kind",
"value": "pipeline_error"
}
},
{
"append": {
"field": "error.message",
"value": "{{{ _ingest.on_failure_message }}}"
}
}
],
"_meta": {
"managed_by": "fleet",
"managed": true,
"package": {
"name": "pfsense"
}
}
}

View File

@@ -56,7 +56,6 @@
{ "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }},
{ "set": { "if": "ctx.scan?.yara?.matches instanceof List", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }},
{ "set": { "if": "ctx.rule?.name != null", "field": "event.dataset", "value": "alert", "override": true }},
{ "set": { "if": "ctx.rule?.name != null", "field": "rule.uuid", "value": "{{rule.name}}", "override": true }},
{ "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }},
{ "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } },
{ "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}},
@@ -68,8 +67,7 @@
{ "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }},
{ "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }},
{ "convert" : { "field" : "scan.pe.flags","type": "string", "ignore_missing":true }},
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -4,7 +4,6 @@
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },

View File

@@ -1,21 +0,0 @@
{
"description" : "suricata.ike",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_auth", "target_field": "ike.algorithm.authentication", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_enc", "target_field": "ike.algorithm.encryption", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_esn", "target_field": "ike.algorithm.esn", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_dh", "target_field": "ike.algorithm.dh", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_prf", "target_field": "ike.algorithm.prf", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.exchange_type", "target_field": "ike.exchange_type", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.payload", "target_field": "ike.payload", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.role", "target_field": "ike.role", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.init_spi", "target_field": "ike.spi.initiator", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.resp_spi", "target_field": "ike.spi.responder", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.version_major", "target_field": "ike.version.major", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.version_minor", "target_field": "ike.version.minor", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.ikev2.errors", "target_field": "ike.ikev2.errors", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -0,0 +1,8 @@
{
"description" : "suricata.ikev2",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -27,8 +27,7 @@
"monitor",
"read",
"read_cross_cluster",
"view_index_metadata",
"write"
"view_index_metadata"
]
}
],

View File

@@ -13,8 +13,7 @@
"monitor",
"read",
"read_cross_cluster",
"view_index_metadata",
"write"
"view_index_metadata"
]
}
],

View File

@@ -5,10 +5,6 @@ elasticsearch:
esheap:
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
helpLink: elasticsearch.html
index_clean:
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
forcedType: bool
helpLink: elasticsearch.html
retention:
retention_pct:
decription: Total percentage of space used by Elasticsearch for multi node clusters
@@ -49,28 +45,6 @@ elasticsearch:
description: Max number of boolean clauses per query.
global: True
helpLink: elasticsearch.html
pipelines:
custom001: &pipelines
description:
description: Description of the ingest node pipeline
global: True
advanced: True
helpLink: elasticsearch.html
processors:
description: Processors for the ingest node pipeline
global: True
advanced: True
multiline: True
helpLink: elasticsearch.html
custom002: *pipelines
custom003: *pipelines
custom004: *pipelines
custom005: *pipelines
custom006: *pipelines
custom007: *pipelines
custom008: *pipelines
custom009: *pipelines
custom010: *pipelines
index_settings:
global_overrides:
index_template:
@@ -99,9 +73,12 @@ elasticsearch:
description: The order to sort by. Must set index_sorting to True.
global: True
helpLink: elasticsearch.html
policy:
phases:
hot:
max_age:
description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
global: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
@@ -120,9 +97,7 @@ elasticsearch:
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
helpLink: elasticsearch.html
actions:
@@ -133,8 +108,8 @@ elasticsearch:
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^[0-9]{1,5}d$
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^\[0-9\]{1,5}d$
forcedType: string
global: True
actions:
@@ -147,8 +122,6 @@ elasticsearch:
delete:
min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
@@ -275,9 +248,7 @@ elasticsearch:
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^[0-9]{1,5}d$
forcedType: string
description: Minimum age of index. This determines when the index should be moved to the hot tier.
global: True
advanced: True
helpLink: elasticsearch.html
@@ -302,9 +273,7 @@ elasticsearch:
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
advanced: True
helpLink: elasticsearch.html
@@ -319,8 +288,6 @@ elasticsearch:
delete:
min_age:
description: Minimum age of index. This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
advanced: True
helpLink: elasticsearch.html
@@ -351,7 +318,6 @@ elasticsearch:
so-logs-windows_x_powershell: *indexSettings
so-logs-windows_x_powershell_operational: *indexSettings
so-logs-windows_x_sysmon_operational: *indexSettings
so-logs-winlog_x_winlog: *indexSettings
so-logs-apache_x_access: *indexSettings
so-logs-apache_x_error: *indexSettings
so-logs-auditd_x_log: *indexSettings
@@ -376,17 +342,10 @@ elasticsearch:
so-logs-azure_x_signinlogs: *indexSettings
so-logs-azure_x_springcloudlogs: *indexSettings
so-logs-barracuda_x_waf: *indexSettings
so-logs-cef_x_log: *indexSettings
so-logs-cisco_asa_x_log: *indexSettings
so-logs-cisco_ftd_x_log: *indexSettings
so-logs-cisco_ios_x_log: *indexSettings
so-logs-cisco_ise_x_log: *indexSettings
so-logs-citrix_adc_x_interface: *indexSettings
so-logs-citrix_adc_x_lbvserver: *indexSettings
so-logs-citrix_adc_x_service: *indexSettings
so-logs-citrix_adc_x_system: *indexSettings
so-logs-citrix_adc_x_vpn: *indexSettings
so-logs-citrix_waf_x_log: *indexSettings
so-logs-cloudflare_x_audit: *indexSettings
so-logs-cloudflare_x_logpull: *indexSettings
so-logs-crowdstrike_x_falcon: *indexSettings
@@ -394,7 +353,6 @@ elasticsearch:
so-logs-darktrace_x_ai_analyst_alert: *indexSettings
so-logs-darktrace_x_model_breach_alert: *indexSettings
so-logs-darktrace_x_system_status_alert: *indexSettings
so-logs-detections_x_alerts: *indexSettings
so-logs-f5_bigip_x_log: *indexSettings
so-logs-fim_x_event: *indexSettings
so-logs-fortinet_x_clientendpoint: *indexSettings
@@ -448,8 +406,6 @@ elasticsearch:
so-logs-mysql_x_error: *indexSettings
so-logs-mysql_x_slowlog: *indexSettings
so-logs-netflow_x_log: *indexSettings
so-logs-nginx_x_access: *indexSettings
so-logs-nginx_x_error: *indexSettings
so-logs-o365_x_audit: *indexSettings
so-logs-okta_x_system: *indexSettings
so-logs-panw_x_panos: *indexSettings
@@ -515,7 +471,6 @@ elasticsearch:
so-metrics-endpoint_x_metadata: *indexSettings
so-metrics-endpoint_x_metrics: *indexSettings
so-metrics-endpoint_x_policy: *indexSettings
so-metrics-nginx_x_stubstatus: *indexSettings
so-case: *indexSettings
so-common: *indexSettings
so-endgame: *indexSettings
@@ -523,7 +478,6 @@ elasticsearch:
so-suricata: *indexSettings
so-import: *indexSettings
so-kratos: *indexSettings
so-kismet: *indexSettings
so-logstash: *indexSettings
so-redis: *indexSettings
so-strelka: *indexSettings

View File

@@ -2,9 +2,11 @@
{% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %}
{% set PILLAR_GLOBAL_OVERRIDES = {} %}
{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings', {}) %}
{% if ES_INDEX_PILLAR.global_overrides is defined %}
{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %}
{% if salt['pillar.get']('elasticsearch:index_settings') is defined %}
{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings') %}
{% if ES_INDEX_PILLAR.global_overrides is defined %}
{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %}
{% endif %}
{% endif %}
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
@@ -17,12 +19,6 @@
{% set ES_INDEX_SETTINGS = {} %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #}
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
{% endif %}
{% if settings.index_template is defined %}
{% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %}
{% do settings.index_template.template.settings.index.pop('sort') %}

View File

@@ -1,36 +0,0 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-device.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"device": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"manufacturer": {
"ignore_above": 1024,
"type": "keyword"
},
"model": {
"properties": {
"identifier": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
}
}
}
}

View File

@@ -1,32 +0,0 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-base.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"kismet": {
"properties": {
"alerts": {
"properties": {
"count": {
"type": "long"
}
}
},
"first_seen": {
"type": "date"
},
"last_seen": {
"type": "date"
},
"seenby": {
"type": "nested"
}
}
}
}
}
}
}

View File

@@ -77,43 +77,6 @@
"type": "keyword"
}
}
},
"wireless": {
"properties": {
"associated_clients": {
"ignore_above": 1024,
"type": "keyword"
},
"bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"channel": {
"ignore_above": 1024,
"type": "keyword"
},
"channel_utilization": {
"type": "float"
},
"frequency": {
"type": "double"
},
"ssid": {
"ignore_above": 1024,
"type": "keyword"
},
"ssid_cloaked": {
"type": "integer"
},
"known_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"last_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}

View File

@@ -1,383 +1,382 @@
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "logs"
},
"codec": "best_compression",
"default_pipeline": "logs-elastic_agent-1.13.1",
"mapping": {
"total_fields": {
"limit": "10000"
}
},
"query": {
"default_field": [
"cloud.account.id",
"cloud.availability_zone",
"cloud.instance.id",
"cloud.instance.name",
"cloud.machine.type",
"cloud.provider",
"cloud.region",
"cloud.project.id",
"cloud.image.id",
"container.id",
"container.image.name",
"container.name",
"host.architecture",
"host.hostname",
"host.id",
"host.mac",
"host.name",
"host.os.family",
"host.os.kernel",
"host.os.name",
"host.os.platform",
"host.os.version",
"host.os.build",
"host.os.codename",
"host.type",
"ecs.version",
"agent.build.original",
"agent.ephemeral_id",
"agent.id",
"agent.name",
"agent.type",
"agent.version",
"log.level",
"message",
"elastic_agent.id",
"elastic_agent.process",
"elastic_agent.version",
"component.id",
"component.type",
"component.binary",
"component.state",
"component.old_state",
"unit.id",
"unit.type",
"unit.state",
"unit.old_state"
]
}
}
},
"mappings": {
"dynamic": false,
"dynamic_templates": [
{
"container.labels": {
"path_match": "container.labels.*",
"mapping": {
"type": "keyword"
},
"match_mapping_type": "string"
}
}
],
"properties": {
"container": {
"properties": {
"image": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"agent": {
"properties": {
"build": {
"properties": {
"original": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"ephemeral_id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"log": {
"properties": {
"level": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"elastic_agent": {
"properties": {
"process": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"snapshot": {
"type": "boolean"
}
}
},
"message": {
"type": "text"
},
"cloud": {
"properties": {
"availability_zone": {
"ignore_above": 1024,
"type": "keyword"
},
"image": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"instance": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"machine": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"project": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"region": {
"ignore_above": 1024,
"type": "keyword"
},
"account": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
{"template": {
"settings": {
"index": {
"lifecycle": {
"name": "logs"
},
"codec": "best_compression",
"default_pipeline": "logs-elastic_agent-1.13.1",
"mapping": {
"total_fields": {
"limit": "10000"
}
},
"query": {
"default_field": [
"cloud.account.id",
"cloud.availability_zone",
"cloud.instance.id",
"cloud.instance.name",
"cloud.machine.type",
"cloud.provider",
"cloud.region",
"cloud.project.id",
"cloud.image.id",
"container.id",
"container.image.name",
"container.name",
"host.architecture",
"host.hostname",
"host.id",
"host.mac",
"host.name",
"host.os.family",
"host.os.kernel",
"host.os.name",
"host.os.platform",
"host.os.version",
"host.os.build",
"host.os.codename",
"host.type",
"ecs.version",
"agent.build.original",
"agent.ephemeral_id",
"agent.id",
"agent.name",
"agent.type",
"agent.version",
"log.level",
"message",
"elastic_agent.id",
"elastic_agent.process",
"elastic_agent.version",
"component.id",
"component.type",
"component.binary",
"component.state",
"component.old_state",
"unit.id",
"unit.type",
"unit.state",
"unit.old_state"
]
}
}
}
},
"component": {
"properties": {
"binary": {
"ignore_above": 1024,
"type": "keyword"
},
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"unit": {
"properties": {
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"host": {
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"os": {
"properties": {
"build": {
"ignore_above": 1024,
"type": "keyword"
},
"kernel": {
"ignore_above": 1024,
"type": "keyword"
},
"codename": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"text": {
"type": "text"
},
"mappings": {
"dynamic": false,
"dynamic_templates": [
{
"container.labels": {
"path_match": "container.labels.*",
"mapping": {
"type": "keyword"
},
"match_mapping_type": "string"
}
}
],
"properties": {
"container": {
"properties": {
"image": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"agent": {
"properties": {
"build": {
"properties": {
"original": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"ephemeral_id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"log": {
"properties": {
"level": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"elastic_agent": {
"properties": {
"process": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"snapshot": {
"type": "boolean"
}
}
},
"message": {
"type": "text"
},
"cloud": {
"properties": {
"availability_zone": {
"ignore_above": 1024,
"type": "keyword"
},
"image": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"instance": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"machine": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"project": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"region": {
"ignore_above": 1024,
"type": "keyword"
},
"account": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
},
"family": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"platform": {
"ignore_above": 1024,
"type": "keyword"
}
},
"component": {
"properties": {
"binary": {
"ignore_above": 1024,
"type": "keyword"
},
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"unit": {
"properties": {
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"host": {
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"os": {
"properties": {
"build": {
"ignore_above": 1024,
"type": "keyword"
},
"kernel": {
"ignore_above": 1024,
"type": "keyword"
},
"codename": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"text": {
"type": "text"
}
}
},
"family": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"platform": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"domain": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "ip"
},
"containerized": {
"type": "boolean"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"mac": {
"ignore_above": 1024,
"type": "keyword"
},
"architecture": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"event": {
"properties": {
"dataset": {
"type": "constant_keyword"
}
}
}
},
"domain": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "ip"
},
"containerized": {
"type": "boolean"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"mac": {
"ignore_above": 1024,
"type": "keyword"
},
"architecture": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"event": {
"properties": {
"dataset": {
"type": "constant_keyword"
}
}
"_meta": {
"package": {
"name": "elastic_agent"
},
"managed_by": "fleet",
"managed": true
}
}
}
},
"_meta": {
"package": {
"name": "elastic_agent"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,12 +0,0 @@
{
"template": {
"settings": {}
},
"_meta": {
"package": {
"name": "endpoint"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,132 +0,0 @@
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "logs-endpoint.collection-diagnostic"
},
"codec": "best_compression",
"default_pipeline": "logs-endpoint.diagnostic.collection-8.10.2",
"mapping": {
"total_fields": {
"limit": "10000"
},
"ignore_malformed": "true"
},
"query": {
"default_field": [
"ecs.version",
"event.action",
"event.category",
"event.code",
"event.dataset",
"event.hash",
"event.id",
"event.kind",
"event.module",
"event.outcome",
"event.provider",
"event.type"
]
}
}
},
"mappings": {
"dynamic": false,
"properties": {
"@timestamp": {
"ignore_malformed": false,
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"event": {
"properties": {
"severity": {
"type": "long"
},
"code": {
"ignore_above": 1024,
"type": "keyword"
},
"created": {
"type": "date"
},
"kind": {
"ignore_above": 1024,
"type": "keyword"
},
"module": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"sequence": {
"type": "long"
},
"ingested": {
"type": "date"
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"action": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"category": {
"ignore_above": 1024,
"type": "keyword"
},
"dataset": {
"ignore_above": 1024,
"type": "keyword"
},
"hash": {
"ignore_above": 1024,
"type": "keyword"
},
"outcome": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
},
"_meta": {
"package": {
"name": "endpoint"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,22 +0,0 @@
{
"template": {
"mappings": {
"properties": {
"error": {
"properties": {
"message": {
"type": "match_only_text"
}
}
}
}
}
},
"_meta": {
"package": {
"name": "system"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,154 +0,0 @@
{
"template": {
"mappings": {
"properties": {
"so_audit_doc_id": {
"ignore_above": 1024,
"type": "keyword"
},
"@timestamp": {
"type": "date"
},
"so_kind": {
"ignore_above": 1024,
"type": "keyword"
},
"so_operation": {
"ignore_above": 1024,
"type": "keyword"
},
"so_detection": {
"properties": {
"publicId": {
"ignore_above": 1024,
"type": "keyword"
},
"title": {
"ignore_above": 1024,
"type": "keyword"
},
"severity": {
"ignore_above": 1024,
"type": "keyword"
},
"author": {
"ignore_above": 1024,
"type": "keyword"
},
"description": {
"type": "text"
},
"category": {
"ignore_above": 1024,
"type": "keyword"
},
"product": {
"ignore_above": 1024,
"type": "keyword"
},
"service": {
"ignore_above": 1024,
"type": "keyword"
},
"content": {
"type": "text"
},
"isEnabled": {
"type": "boolean"
},
"isReporting": {
"type": "boolean"
},
"isCommunity": {
"type": "boolean"
},
"tags": {
"ignore_above": 1024,
"type": "keyword"
},
"ruleset": {
"ignore_above": 1024,
"type": "keyword"
},
"engine": {
"ignore_above": 1024,
"type": "keyword"
},
"language": {
"ignore_above": 1024,
"type": "keyword"
},
"license": {
"ignore_above": 1024,
"type": "keyword"
},
"overrides": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"isEnabled": {
"type": "boolean"
},
"createdAt": {
"type": "date"
},
"updatedAt": {
"type": "date"
},
"regex": {
"type": "text"
},
"value": {
"type": "text"
},
"thresholdType": {
"ignore_above": 1024,
"type": "keyword"
},
"track": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "text"
},
"count": {
"type": "long"
},
"seconds": {
"type": "long"
},
"customFilter": {
"type": "text"
}
}
}
}
},
"so_detectioncomment": {
"properties": {
"createTime": {
"type": "date"
},
"detectionId": {
"ignore_above": 1024,
"type": "keyword"
},
"value": {
"type": "text"
},
"userId": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
},
"_meta": {
"ecs_version": "1.12.2"
}
}

View File

@@ -1,7 +0,0 @@
{
"template": {},
"version": 1,
"_meta": {
"description": "default settings for common Security Onion Detections indices"
}
}

View File

@@ -14,19 +14,16 @@
},
"pe": {
"properties": {
"flags": {
"type": "text"
},
"image_version": {
"type": "float"
},
"sections": {
"sections": {
"properties": {
"entropy": {
"type": "float"
}
}
}
},
"image_version": {
"type": "float"
}
}
},
"elf": {

View File

@@ -5,6 +5,6 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L "https://localhost:9200/_cat/indices?pretty&v&s=index"
curl -K /opt/so/conf/elasticsearch/curl.config-X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"

View File

@@ -40,7 +40,7 @@ fi
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
{% if GLOBALS.role == 'so-manager' %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do
{% else %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
{% endif %}

View File

@@ -13,7 +13,7 @@ TOTAL_USED_SPACE=0
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space
{% if GLOBALS.role == 'so-manager' %}
# Get total disk space - disk.total
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do
{% else %}
# Get disk space taken up by indices - disk.indices
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do

View File

@@ -27,7 +27,6 @@ overlimit() {
# 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit.
# Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop.
while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
# If we can't query Elasticsearch, then immediately return false.
@@ -35,36 +34,28 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
[ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
# We iterate through the closed and open indices
CLOSED_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
OPEN_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Check if index is an older index. If it is an older index, delete it before moving on to newer indices.
if [[ "$INDEX" =~ "^logstash-.*|so-.*" ]]; then
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
else
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
fi
else
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1
for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
fi
else
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1
fi
if ! overlimit ; then
exit

View File

@@ -133,7 +133,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
for i in $pattern; do
TEMPLATE=${i::-14}
COMPONENT_PATTERN=${TEMPLATE:3}
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -v osquery)
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then
load_failures=$((load_failures+1))
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"

View File

@@ -9,9 +9,11 @@
'so-influxdb',
'so-kibana',
'so-kratos',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
@@ -30,9 +32,11 @@
'so-kibana',
'so-kratos',
'so-logstash',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
@@ -91,7 +95,6 @@
{% set NODE_CONTAINERS = [
'so-elastic-fleet',
'so-logstash',
'so-nginx-fleet-node'
] %}
{% elif GLOBALS.role == 'so-sensor' %}

View File

@@ -98,11 +98,19 @@ firewall:
tcp:
- 7788
udp: []
mysql:
tcp:
- 3306
udp: []
nginx:
tcp:
- 80
- 443
udp: []
playbook:
tcp:
- 3000
udp: []
redis:
tcp:
- 6379
@@ -170,6 +178,8 @@ firewall:
hostgroups:
eval:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -353,6 +363,8 @@ firewall:
hostgroups:
manager:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -547,6 +559,8 @@ firewall:
hostgroups:
managersearch:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -742,6 +756,8 @@ firewall:
- all
standalone:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -1279,10 +1295,6 @@ firewall:
portgroups:
- redis
- beats_5644
managersearch:
portgroups:
- redis
- beats_5644
self:
portgroups:
- redis

View File

@@ -7,7 +7,6 @@ firewall:
multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
anywhere: &hostgroupsettingsadv
description: List of IP or CIDR blocks to allow access to this hostgroup.
forcedType: "[]string"
@@ -16,7 +15,6 @@ firewall:
advanced: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
beats_endpoint: *hostgroupsettings
beats_endpoint_ssl: *hostgroupsettings
dockernet: &ROhostgroupsettingsadv
@@ -55,7 +53,6 @@ firewall:
multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
customhostgroup1: *customhostgroupsettings
customhostgroup2: *customhostgroupsettings
customhostgroup3: *customhostgroupsettings
@@ -73,14 +70,12 @@ firewall:
helpLink: firewall.html
advanced: True
multiline: True
duplicates: True
udp: &udpsettings
description: List of UDP ports for this port group.
forcedType: "[]string"
helpLink: firewall.html
advanced: True
multiline: True
duplicates: True
agrules:
tcp: *tcpsettings
udp: *udpsettings
@@ -126,9 +121,15 @@ firewall:
localrules:
tcp: *tcpsettings
udp: *udpsettings
mysql:
tcp: *tcpsettings
udp: *udpsettings
nginx:
tcp: *tcpsettings
udp: *udpsettings
playbook:
tcp: *tcpsettings
udp: *udpsettings
redis:
tcp: *tcpsettings
udp: *udpsettings
@@ -192,7 +193,6 @@ firewall:
multiline: True
forcedType: "[]string"
helpLink: firewall.html
duplicates: True
sensor:
portgroups: *portgroupsdocker
searchnode:
@@ -246,7 +246,6 @@ firewall:
multiline: True
forcedType: "[]string"
helpLink: firewall.html
duplicates: True
dockernet:
portgroups: *portgroupshost
localhost:

View File

@@ -1,2 +0,0 @@
global:
pcapengine: STENO

View File

@@ -1,2 +0,0 @@
{% import_yaml 'global/defaults.yaml' as GLOBALDEFAULTS %}
{% set GLOBALMERGED = salt['pillar.get']('global', GLOBALDEFAULTS.global, merge=True) %}

View File

@@ -10,15 +10,10 @@ global:
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
mdengine:
description: Which engine to use for meta data generation. Options are ZEEK and SURICATA.
description: What engine to use for meta data generation. Options are ZEEK and SURICATA.
regex: ^(ZEEK|SURICATA)$
regexFailureMessage: You must enter either ZEEK or SURICATA.
global: True
pcapengine:
description: Which engine to use for generating pcap. Options are STENO, SURICATA or TRANSITION.
regex: ^(STENO|SURICATA|TRANSITION)$
regexFailureMessage: You must enter either STENO, SURICATA or TRANSITION.
global: True
ids:
description: Which IDS engine to use. Currently only Suricata is supported.
global: True
@@ -28,7 +23,7 @@ global:
description: Used for handling of authentication cookies.
global: True
airgap:
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.
description: Sets airgap mode.
global: True
readonly: True
imagerepo:

Some files were not shown because too many files have changed in this diff Show More