mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Compare commits
1 Commits
2.4.80-202
...
sysusers
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
50ab63162a |
3
.github/.gitleaks.toml
vendored
3
.github/.gitleaks.toml
vendored
@@ -536,10 +536,11 @@ secretGroup = 4
|
|||||||
|
|
||||||
[allowlist]
|
[allowlist]
|
||||||
description = "global allow lists"
|
description = "global allow lists"
|
||||||
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''']
|
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''']
|
||||||
paths = [
|
paths = [
|
||||||
'''gitleaks.toml''',
|
'''gitleaks.toml''',
|
||||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||||
'''(go.mod|go.sum)$''',
|
'''(go.mod|go.sum)$''',
|
||||||
|
|
||||||
'''salt/nginx/files/enterprise-attack.json'''
|
'''salt/nginx/files/enterprise-attack.json'''
|
||||||
]
|
]
|
||||||
|
|||||||
190
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
190
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -1,190 +0,0 @@
|
|||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
⚠️ This category is solely for conversations related to Security Onion 2.4 ⚠️
|
|
||||||
|
|
||||||
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Version
|
|
||||||
description: Which version of Security Onion 2.4.x are you asking about?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- 2.4 Pre-release (Beta, Release Candidate)
|
|
||||||
- 2.4.10
|
|
||||||
- 2.4.20
|
|
||||||
- 2.4.30
|
|
||||||
- 2.4.40
|
|
||||||
- 2.4.50
|
|
||||||
- 2.4.60
|
|
||||||
- 2.4.70
|
|
||||||
- 2.4.80
|
|
||||||
- 2.4.90
|
|
||||||
- 2.4.100
|
|
||||||
- Other (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Installation Method
|
|
||||||
description: How did you install Security Onion?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- Security Onion ISO image
|
|
||||||
- Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc.
|
|
||||||
- Network installation on Ubuntu
|
|
||||||
- Network installation on Debian
|
|
||||||
- Other (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Description
|
|
||||||
description: >
|
|
||||||
Is this discussion about installation, configuration, upgrading, or other?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- installation
|
|
||||||
- configuration
|
|
||||||
- upgrading
|
|
||||||
- other (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Installation Type
|
|
||||||
description: >
|
|
||||||
When you installed, did you choose Import, Eval, Standalone, Distributed, or something else?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- Import
|
|
||||||
- Eval
|
|
||||||
- Standalone
|
|
||||||
- Distributed
|
|
||||||
- other (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Location
|
|
||||||
description: >
|
|
||||||
Is this deployment in the cloud, on-prem with Internet access, or airgap?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- cloud
|
|
||||||
- on-prem with Internet access
|
|
||||||
- airgap
|
|
||||||
- other (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Hardware Specs
|
|
||||||
description: >
|
|
||||||
Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://docs.securityonion.net/en/2.4/hardware.html?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- Meets minimum requirements
|
|
||||||
- Exceeds minimum requirements
|
|
||||||
- Does not meet minimum requirements
|
|
||||||
- other (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: input
|
|
||||||
attributes:
|
|
||||||
label: CPU
|
|
||||||
description: How many CPU cores do you have?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: input
|
|
||||||
attributes:
|
|
||||||
label: RAM
|
|
||||||
description: How much RAM do you have?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: input
|
|
||||||
attributes:
|
|
||||||
label: Storage for /
|
|
||||||
description: How much storage do you have for the / partition?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: input
|
|
||||||
attributes:
|
|
||||||
label: Storage for /nsm
|
|
||||||
description: How much storage do you have for the /nsm partition?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Network Traffic Collection
|
|
||||||
description: >
|
|
||||||
Are you collecting network traffic from a tap or span port?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- tap
|
|
||||||
- span port
|
|
||||||
- other (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Network Traffic Speeds
|
|
||||||
description: >
|
|
||||||
How much network traffic are you monitoring?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- Less than 1Gbps
|
|
||||||
- 1Gbps to 10Gbps
|
|
||||||
- more than 10Gbps
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Status
|
|
||||||
description: >
|
|
||||||
Does SOC Grid show all services on all nodes as running OK?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- Yes, all services on all nodes are running OK
|
|
||||||
- No, one or more services are failed (please provide detail below)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Salt Status
|
|
||||||
description: >
|
|
||||||
Do you get any failures when you run "sudo salt-call state.highstate"?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- Yes, there are salt failures (please provide detail below)
|
|
||||||
- No, there are no failures
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Logs
|
|
||||||
description: >
|
|
||||||
Are there any additional clues in /opt/so/log/?
|
|
||||||
options:
|
|
||||||
-
|
|
||||||
- Yes, there are additional clues in /opt/so/log/ (please provide detail below)
|
|
||||||
- No, there are no additional clues
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Detail
|
|
||||||
description: Please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and then provide detailed information to help us help you.
|
|
||||||
placeholder: |-
|
|
||||||
STOP! Before typing, please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 in their entirety!
|
|
||||||
|
|
||||||
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: Guidelines
|
|
||||||
options:
|
|
||||||
- label: I have read the discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and assert that I have followed the guidelines.
|
|
||||||
required: true
|
|
||||||
33
.github/workflows/close-threads.yml
vendored
33
.github/workflows/close-threads.yml
vendored
@@ -1,33 +0,0 @@
|
|||||||
name: 'Close Threads'
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '50 1 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
discussions: write
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: lock-threads
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
close-threads:
|
|
||||||
if: github.repository_owner == 'security-onion-solutions'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
steps:
|
|
||||||
- uses: actions/stale@v5
|
|
||||||
with:
|
|
||||||
days-before-issue-stale: -1
|
|
||||||
days-before-issue-close: 60
|
|
||||||
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
|
|
||||||
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
|
|
||||||
days-before-pr-stale: 45
|
|
||||||
days-before-pr-close: 60
|
|
||||||
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
|
|
||||||
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."
|
|
||||||
26
.github/workflows/lock-threads.yml
vendored
26
.github/workflows/lock-threads.yml
vendored
@@ -1,26 +0,0 @@
|
|||||||
name: 'Lock Threads'
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '50 2 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
pull-requests: write
|
|
||||||
discussions: write
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: lock-threads
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lock-threads:
|
|
||||||
if: github.repository_owner == 'security-onion-solutions'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: jertel/lock-threads@main
|
|
||||||
with:
|
|
||||||
include-discussion-currently-open: true
|
|
||||||
discussion-inactive-days: 90
|
|
||||||
issue-inactive-days: 30
|
|
||||||
pr-inactive-days: 30
|
|
||||||
@@ -1,17 +1,17 @@
|
|||||||
### 2.4.80-20240624 ISO image released on 2024/06/25
|
### 2.4.30-20231228 ISO image released on 2024/01/02
|
||||||
|
|
||||||
|
|
||||||
### Download and Verify
|
### Download and Verify
|
||||||
|
|
||||||
2.4.80-20240624 ISO image:
|
2.4.30-20231228 ISO image:
|
||||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.80-20240624.iso
|
https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
|
||||||
|
|
||||||
MD5: 139F9762E926F9CB3C4A9528A3752C31
|
MD5: DBD47645CD6FA8358C51D8753046FB54
|
||||||
SHA1: BC6CA2C5F4ABC1A04E83A5CF8FFA6A53B1583CC9
|
SHA1: 2494091065434ACB028F71444A5D16E8F8A11EDF
|
||||||
SHA256: 70E90845C84FFA30AD6CF21504634F57C273E7996CA72F7250428DDBAAC5B1BD
|
SHA256: 3345AE1DC58AC7F29D82E60D9A36CDF8DE19B7DFF999D8C4F89C7BD36AEE7F1D
|
||||||
|
|
||||||
Signature for ISO image:
|
Signature for ISO image:
|
||||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.80-20240624.iso.sig
|
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
|
||||||
|
|
||||||
Signing key:
|
Signing key:
|
||||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||||
@@ -25,29 +25,27 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
|||||||
|
|
||||||
Download the signature file for the ISO:
|
Download the signature file for the ISO:
|
||||||
```
|
```
|
||||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.80-20240624.iso.sig
|
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the ISO image:
|
Download the ISO image:
|
||||||
```
|
```
|
||||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.80-20240624.iso
|
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify the downloaded ISO image using the signature file:
|
Verify the downloaded ISO image using the signature file:
|
||||||
```
|
```
|
||||||
gpg --verify securityonion-2.4.80-20240624.iso.sig securityonion-2.4.80-20240624.iso
|
gpg --verify securityonion-2.4.30-20231228.iso.sig securityonion-2.4.30-20231228.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||||
```
|
```
|
||||||
gpg: Signature made Mon 24 Jun 2024 02:42:03 PM EDT using RSA key ID FE507013
|
gpg: Signature made Thu 28 Dec 2023 10:08:31 AM EST using RSA key ID FE507013
|
||||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||||
gpg: WARNING: This key is not certified with a trusted signature!
|
gpg: WARNING: This key is not certified with a trusted signature!
|
||||||
gpg: There is no indication that the signature belongs to the owner.
|
gpg: There is no indication that the signature belongs to the owner.
|
||||||
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
|
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
|
||||||
```
|
```
|
||||||
|
|
||||||
If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network.
|
|
||||||
|
|
||||||
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
|
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
|
||||||
https://docs.securityonion.net/en/2.4/installation.html
|
https://docs.securityonion.net/en/2.4/installation.html
|
||||||
|
|||||||
13
README.md
13
README.md
@@ -8,22 +8,19 @@ Alerts
|
|||||||

|

|
||||||
|
|
||||||
Dashboards
|
Dashboards
|
||||||

|

|
||||||
|
|
||||||
Hunt
|
Hunt
|
||||||

|

|
||||||
|
|
||||||
Detections
|
|
||||||

|
|
||||||
|
|
||||||
PCAP
|
PCAP
|
||||||

|

|
||||||
|
|
||||||
Grid
|
Grid
|
||||||

|

|
||||||
|
|
||||||
Config
|
Config
|
||||||

|

|
||||||
|
|
||||||
### Release Notes
|
### Release Notes
|
||||||
|
|
||||||
|
|||||||
@@ -41,8 +41,7 @@ file_roots:
|
|||||||
base:
|
base:
|
||||||
- /opt/so/saltstack/local/salt
|
- /opt/so/saltstack/local/salt
|
||||||
- /opt/so/saltstack/default/salt
|
- /opt/so/saltstack/default/salt
|
||||||
- /nsm/elastic-fleet/artifacts
|
|
||||||
- /opt/so/rules/nids
|
|
||||||
|
|
||||||
# The master_roots setting configures a master-only copy of the file_roots dictionary,
|
# The master_roots setting configures a master-only copy of the file_roots dictionary,
|
||||||
# used by the state compiler.
|
# used by the state compiler.
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
kafka:
|
|
||||||
nodes:
|
|
||||||
@@ -16,6 +16,7 @@ base:
|
|||||||
- sensoroni.adv_sensoroni
|
- sensoroni.adv_sensoroni
|
||||||
- telegraf.soc_telegraf
|
- telegraf.soc_telegraf
|
||||||
- telegraf.adv_telegraf
|
- telegraf.adv_telegraf
|
||||||
|
- users
|
||||||
|
|
||||||
'* and not *_desktop':
|
'* and not *_desktop':
|
||||||
- firewall.soc_firewall
|
- firewall.soc_firewall
|
||||||
@@ -43,6 +44,8 @@ base:
|
|||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
- soc.license
|
||||||
|
- soctopus.soc_soctopus
|
||||||
|
- soctopus.adv_soctopus
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- kratos.soc_kratos
|
- kratos.soc_kratos
|
||||||
@@ -59,12 +62,10 @@ base:
|
|||||||
- elastalert.adv_elastalert
|
- elastalert.adv_elastalert
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
- backup.adv_backup
|
- backup.adv_backup
|
||||||
|
- soctopus.soc_soctopus
|
||||||
|
- soctopus.adv_soctopus
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- kafka.nodes
|
|
||||||
- kafka.soc_kafka
|
|
||||||
- kafka.adv_kafka
|
|
||||||
- stig.soc_stig
|
|
||||||
|
|
||||||
'*_sensor':
|
'*_sensor':
|
||||||
- healthcheck.sensor
|
- healthcheck.sensor
|
||||||
@@ -80,8 +81,6 @@ base:
|
|||||||
- suricata.adv_suricata
|
- suricata.adv_suricata
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
|
||||||
- soc.license
|
|
||||||
|
|
||||||
'*_eval':
|
'*_eval':
|
||||||
- secrets
|
- secrets
|
||||||
@@ -107,6 +106,8 @@ base:
|
|||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
- soc.license
|
||||||
|
- soctopus.soc_soctopus
|
||||||
|
- soctopus.adv_soctopus
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- strelka.soc_strelka
|
- strelka.soc_strelka
|
||||||
@@ -162,6 +163,8 @@ base:
|
|||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
- soc.license
|
||||||
|
- soctopus.soc_soctopus
|
||||||
|
- soctopus.adv_soctopus
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- strelka.soc_strelka
|
- strelka.soc_strelka
|
||||||
@@ -178,10 +181,6 @@ base:
|
|||||||
- suricata.adv_suricata
|
- suricata.adv_suricata
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
|
||||||
- kafka.nodes
|
|
||||||
- kafka.soc_kafka
|
|
||||||
- kafka.adv_kafka
|
|
||||||
|
|
||||||
'*_heavynode':
|
'*_heavynode':
|
||||||
- elasticsearch.auth
|
- elasticsearch.auth
|
||||||
@@ -224,9 +223,6 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
|
||||||
- soc.license
|
|
||||||
- kafka.nodes
|
|
||||||
|
|
||||||
'*_receiver':
|
'*_receiver':
|
||||||
- logstash.nodes
|
- logstash.nodes
|
||||||
@@ -239,10 +235,6 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- kafka.nodes
|
|
||||||
- kafka.soc_kafka
|
|
||||||
- kafka.adv_kafka
|
|
||||||
- soc.license
|
|
||||||
|
|
||||||
'*_import':
|
'*_import':
|
||||||
- secrets
|
- secrets
|
||||||
@@ -265,6 +257,8 @@ base:
|
|||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
- soc.license
|
||||||
|
- soctopus.soc_soctopus
|
||||||
|
- soctopus.adv_soctopus
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
|
|||||||
2
pillar/users/init.sls
Normal file
2
pillar/users/init.sls
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# users pillar goes in /opt/so/saltstack/local/pillar/users/init.sls
|
||||||
|
# the users directory may need to be created under /opt/so/saltstack/local/pillar
|
||||||
18
pillar/users/pillar.example
Normal file
18
pillar/users/pillar.example
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
users:
|
||||||
|
sclapton:
|
||||||
|
# required fields
|
||||||
|
status: present
|
||||||
|
# node_access determines which node types the user can access.
|
||||||
|
# this can either be by grains.role or by final part of the minion id after the _
|
||||||
|
node_access:
|
||||||
|
- standalone
|
||||||
|
- searchnode
|
||||||
|
# optional fields
|
||||||
|
fullname: Stevie Claptoon
|
||||||
|
uid: 1001
|
||||||
|
gid: 1001
|
||||||
|
homephone: does not have a phone
|
||||||
|
groups:
|
||||||
|
- mygroup1
|
||||||
|
- mygroup2
|
||||||
|
- wheel # give sudo access
|
||||||
20
pillar/users/pillar.usage
Normal file
20
pillar/users/pillar.usage
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
users:
|
||||||
|
sclapton:
|
||||||
|
# required fields
|
||||||
|
status: <present | absent>
|
||||||
|
# node_access determines which node types the user can access.
|
||||||
|
# this can either be by grains.role or by final part of the minion id after the _
|
||||||
|
node_access:
|
||||||
|
- standalone
|
||||||
|
- searchnode
|
||||||
|
# optional fields
|
||||||
|
fullname: <string>
|
||||||
|
uid: <integer>
|
||||||
|
gid: <integer>
|
||||||
|
roomnumber: <string>
|
||||||
|
workphone: <string>
|
||||||
|
homephone: <string>
|
||||||
|
groups:
|
||||||
|
- <string>
|
||||||
|
- <string>
|
||||||
|
- wheel # give sudo access
|
||||||
12
pyci.sh
12
pyci.sh
@@ -15,16 +15,12 @@ TARGET_DIR=${1:-.}
|
|||||||
|
|
||||||
PATH=$PATH:/usr/local/bin
|
PATH=$PATH:/usr/local/bin
|
||||||
|
|
||||||
if [ ! -d .venv ]; then
|
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
|
||||||
python -m venv .venv
|
echo "Missing dependencies. Consider running the following command:"
|
||||||
fi
|
echo " python -m pip install flake8 pytest pytest-cov"
|
||||||
|
|
||||||
source .venv/bin/activate
|
|
||||||
|
|
||||||
if ! pip install flake8 pytest pytest-cov pyyaml; then
|
|
||||||
echo "Unable to install dependencies."
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
pip install pytest pytest-cov
|
||||||
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
|
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
|
||||||
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
|
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
|
||||||
@@ -34,6 +34,7 @@
|
|||||||
'suricata',
|
'suricata',
|
||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
|
'soctopus',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean'
|
'docker_clean'
|
||||||
],
|
],
|
||||||
@@ -65,7 +66,6 @@
|
|||||||
'registry',
|
'registry',
|
||||||
'manager',
|
'manager',
|
||||||
'nginx',
|
'nginx',
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
@@ -92,7 +92,6 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elasticfleet',
|
'elasticfleet',
|
||||||
@@ -102,9 +101,8 @@
|
|||||||
'suricata.manager',
|
'suricata.manager',
|
||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean',
|
'soctopus',
|
||||||
'stig',
|
'docker_clean'
|
||||||
'kafka'
|
|
||||||
],
|
],
|
||||||
'so-managersearch': [
|
'so-managersearch': [
|
||||||
'salt.master',
|
'salt.master',
|
||||||
@@ -114,7 +112,6 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elastic-fleet-package-registry',
|
'elastic-fleet-package-registry',
|
||||||
@@ -125,9 +122,8 @@
|
|||||||
'suricata.manager',
|
'suricata.manager',
|
||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean',
|
'soctopus',
|
||||||
'stig',
|
'docker_clean'
|
||||||
'kafka'
|
|
||||||
],
|
],
|
||||||
'so-searchnode': [
|
'so-searchnode': [
|
||||||
'ssl',
|
'ssl',
|
||||||
@@ -135,8 +131,7 @@
|
|||||||
'telegraf',
|
'telegraf',
|
||||||
'firewall',
|
'firewall',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean',
|
'docker_clean'
|
||||||
'stig'
|
|
||||||
],
|
],
|
||||||
'so-standalone': [
|
'so-standalone': [
|
||||||
'salt.master',
|
'salt.master',
|
||||||
@@ -159,10 +154,9 @@
|
|||||||
'healthcheck',
|
'healthcheck',
|
||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
|
'soctopus',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean',
|
'docker_clean'
|
||||||
'stig',
|
|
||||||
'kafka'
|
|
||||||
],
|
],
|
||||||
'so-sensor': [
|
'so-sensor': [
|
||||||
'ssl',
|
'ssl',
|
||||||
@@ -174,15 +168,13 @@
|
|||||||
'healthcheck',
|
'healthcheck',
|
||||||
'schedule',
|
'schedule',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean',
|
'docker_clean'
|
||||||
'stig'
|
|
||||||
],
|
],
|
||||||
'so-fleet': [
|
'so-fleet': [
|
||||||
'ssl',
|
'ssl',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'firewall',
|
'firewall',
|
||||||
'logstash',
|
'logstash',
|
||||||
'nginx',
|
|
||||||
'healthcheck',
|
'healthcheck',
|
||||||
'schedule',
|
'schedule',
|
||||||
'elasticfleet',
|
'elasticfleet',
|
||||||
@@ -193,10 +185,7 @@
|
|||||||
'telegraf',
|
'telegraf',
|
||||||
'firewall',
|
'firewall',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean',
|
'docker_clean'
|
||||||
'kafka',
|
|
||||||
'elasticsearch.ca',
|
|
||||||
'stig'
|
|
||||||
],
|
],
|
||||||
'so-desktop': [
|
'so-desktop': [
|
||||||
'ssl',
|
'ssl',
|
||||||
@@ -205,6 +194,10 @@
|
|||||||
],
|
],
|
||||||
}, grain='role') %}
|
}, grain='role') %}
|
||||||
|
|
||||||
|
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||||
|
{% do allowed_states.append('mysql') %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||||
{% do allowed_states.append('zeek') %}
|
{% do allowed_states.append('zeek') %}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
@@ -230,6 +223,10 @@
|
|||||||
{% do allowed_states.append('elastalert') %}
|
{% do allowed_states.append('elastalert') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||||
|
{% do allowed_states.append('playbook') %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||||
{% do allowed_states.append('logstash') %}
|
{% do allowed_states.append('logstash') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
|
||||||
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
|
||||||
{% else %}
|
|
||||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
{% import 'bpf/macros.jinja' as MACROS %}
|
{% import 'bpf/macros.jinja' as MACROS %}
|
||||||
|
|
||||||
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
||||||
|
|
||||||
{% set PCAPBPF = BPFMERGED.pcap %}
|
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
bpf:
|
bpf:
|
||||||
pcap:
|
pcap:
|
||||||
description: List of BPF filters to apply to Stenographer.
|
description: List of BPF filters to apply to PCAP.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
mine_functions:
|
||||||
|
x509.get_pem_entries: [/etc/pki/ca.crt]
|
||||||
|
|
||||||
x509_signing_policies:
|
x509_signing_policies:
|
||||||
filebeat:
|
filebeat:
|
||||||
- minions: '*'
|
- minions: '*'
|
||||||
@@ -67,17 +70,3 @@ x509_signing_policies:
|
|||||||
- authorityKeyIdentifier: keyid,issuer:always
|
- authorityKeyIdentifier: keyid,issuer:always
|
||||||
- days_valid: 820
|
- days_valid: 820
|
||||||
- copypath: /etc/pki/issued_certs/
|
- copypath: /etc/pki/issued_certs/
|
||||||
kafka:
|
|
||||||
- minions: '*'
|
|
||||||
- signing_private_key: /etc/pki/ca.key
|
|
||||||
- signing_cert: /etc/pki/ca.crt
|
|
||||||
- C: US
|
|
||||||
- ST: Utah
|
|
||||||
- L: Salt Lake City
|
|
||||||
- basicConstraints: "critical CA:false"
|
|
||||||
- keyUsage: "digitalSignature, keyEncipherment"
|
|
||||||
- subjectKeyIdentifier: hash
|
|
||||||
- authorityKeyIdentifier: keyid,issuer:always
|
|
||||||
- extendedKeyUsage: "serverAuth, clientAuth"
|
|
||||||
- days_valid: 820
|
|
||||||
- copypath: /etc/pki/issued_certs/
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
include:
|
include:
|
||||||
|
- common.soup_scripts
|
||||||
- common.packages
|
- common.packages
|
||||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||||
- manager.elasticsearch # needed for elastic_curl_config state
|
- manager.elasticsearch # needed for elastic_curl_config state
|
||||||
@@ -133,18 +134,6 @@ common_sbin_jinja:
|
|||||||
- file_mode: 755
|
- file_mode: 755
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
|
||||||
{% if not GLOBALS.is_manager%}
|
|
||||||
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
|
|
||||||
# these two states remove the scripts from non manager nodes
|
|
||||||
remove_soup:
|
|
||||||
file.absent:
|
|
||||||
- name: /usr/sbin/soup
|
|
||||||
|
|
||||||
remove_so-firewall:
|
|
||||||
file.absent:
|
|
||||||
- name: /usr/sbin/so-firewall
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
so-status_script:
|
so-status_script:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /usr/sbin/so-status
|
- name: /usr/sbin/so-status
|
||||||
|
|||||||
@@ -1,117 +1,23 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Sync some Utilities
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
soup_scripts:
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
file.recurse:
|
||||||
# Elastic License 2.0.
|
- name: /usr/sbin
|
||||||
|
- user: root
|
||||||
|
- group: root
|
||||||
|
- file_mode: 755
|
||||||
|
- source: salt://common/tools/sbin
|
||||||
|
- include_pat:
|
||||||
|
- so-common
|
||||||
|
- so-image-common
|
||||||
|
|
||||||
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
|
soup_manager_scripts:
|
||||||
|
file.recurse:
|
||||||
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
|
- name: /usr/sbin
|
||||||
{% if SOC_GLOBAL.global.airgap %}
|
- user: root
|
||||||
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
|
- group: root
|
||||||
{% else %}
|
- file_mode: 755
|
||||||
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
|
- source: salt://manager/tools/sbin
|
||||||
{% endif %}
|
- include_pat:
|
||||||
|
- so-firewall
|
||||||
remove_common_soup:
|
- so-repo-sync
|
||||||
file.absent:
|
- soup
|
||||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
|
|
||||||
|
|
||||||
remove_common_so-firewall:
|
|
||||||
file.absent:
|
|
||||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
|
|
||||||
|
|
||||||
# This section is used to put the scripts in place in the Salt file system
|
|
||||||
# in case a state run tries to overwrite what we do in the next section.
|
|
||||||
copy_so-common_common_tools_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
|
|
||||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-image-common_common_tools_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common
|
|
||||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_soup_manager_tools_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/soup
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-firewall_manager_tools_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-firewall
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-yaml_manager_tools_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-yaml.py
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-repo-sync_manager_tools_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-repo-sync
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
# This section is used to put the new script in place so that it can be called during soup.
|
|
||||||
# It is faster than calling the states that normally manage them to put them in place.
|
|
||||||
copy_so-common_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /usr/sbin/so-common
|
|
||||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-image-common_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /usr/sbin/so-image-common
|
|
||||||
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_soup_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /usr/sbin/soup
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-firewall_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /usr/sbin/so-firewall
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-yaml_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /usr/sbin/so-yaml.py
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
copy_so-repo-sync_sbin:
|
|
||||||
file.copy:
|
|
||||||
- name: /usr/sbin/so-repo-sync
|
|
||||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
|
|
||||||
- force: True
|
|
||||||
- preserve: True
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
fix_23_soup_sbin:
|
|
||||||
cmd.run:
|
|
||||||
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
|
|
||||||
fix_23_soup_salt:
|
|
||||||
cmd.run:
|
|
||||||
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -5,13 +5,8 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
cat << EOF
|
salt-call state.highstate -l info
|
||||||
|
|
||||||
so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see:
|
|
||||||
https://docs.securityonion.net/en/2.4/salt.html
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
salt-call state.highstate -l info queue=True
|
|
||||||
|
|||||||
@@ -31,11 +31,6 @@ if ! echo "$PATH" | grep -q "/usr/sbin"; then
|
|||||||
export PATH="$PATH:/usr/sbin"
|
export PATH="$PATH:/usr/sbin"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# See if a proxy is set. If so use it.
|
|
||||||
if [ -f /etc/profile.d/so-proxy.sh ]; then
|
|
||||||
. /etc/profile.d/so-proxy.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Define a banner to separate sections
|
# Define a banner to separate sections
|
||||||
banner="========================================================================="
|
banner="========================================================================="
|
||||||
|
|
||||||
@@ -184,21 +179,6 @@ copy_new_files() {
|
|||||||
cd /tmp
|
cd /tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
create_local_directories() {
|
|
||||||
echo "Creating local pillar and salt directories if needed"
|
|
||||||
PILLARSALTDIR=$1
|
|
||||||
local_salt_dir="/opt/so/saltstack/local"
|
|
||||||
for i in "pillar" "salt"; do
|
|
||||||
for d in $(find $PILLARSALTDIR/$i -type d); do
|
|
||||||
suffixdir=${d//$PILLARSALTDIR/}
|
|
||||||
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
|
|
||||||
mkdir -pv $local_salt_dir$suffixdir
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
chown -R socore:socore $local_salt_dir/$i
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
disable_fastestmirror() {
|
disable_fastestmirror() {
|
||||||
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
|
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
|
||||||
}
|
}
|
||||||
@@ -268,14 +248,6 @@ get_random_value() {
|
|||||||
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
|
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
|
||||||
}
|
}
|
||||||
|
|
||||||
get_agent_count() {
|
|
||||||
if [ -f /opt/so/log/agents/agentstatus.log ]; then
|
|
||||||
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
|
|
||||||
else
|
|
||||||
AGENTCOUNT=0
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
gpg_rpm_import() {
|
gpg_rpm_import() {
|
||||||
if [[ $is_oracle ]]; then
|
if [[ $is_oracle ]]; then
|
||||||
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
||||||
@@ -357,7 +329,7 @@ lookup_salt_value() {
|
|||||||
local=""
|
local=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local}
|
salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
|
||||||
}
|
}
|
||||||
|
|
||||||
lookup_pillar() {
|
lookup_pillar() {
|
||||||
@@ -394,13 +366,6 @@ is_feature_enabled() {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
read_feat() {
|
|
||||||
if [ -f /opt/so/log/sostatus/lks_enabled ]; then
|
|
||||||
lic_id=$(cat /opt/so/saltstack/local/pillar/soc/license.sls | grep license_id: | awk '{print $2}')
|
|
||||||
echo "$lic_id/$(cat /opt/so/log/sostatus/lks_enabled)/$(cat /opt/so/log/sostatus/fps_enabled)"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
require_manager() {
|
require_manager() {
|
||||||
if is_manager_node; then
|
if is_manager_node; then
|
||||||
echo "This is a manager, so we can proceed."
|
echo "This is a manager, so we can proceed."
|
||||||
@@ -594,15 +559,6 @@ status () {
|
|||||||
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
|
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
sync_options() {
|
|
||||||
set_version
|
|
||||||
set_os
|
|
||||||
salt_minion_count
|
|
||||||
get_agent_count
|
|
||||||
|
|
||||||
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
|
|
||||||
}
|
|
||||||
|
|
||||||
systemctl_func() {
|
systemctl_func() {
|
||||||
local action=$1
|
local action=$1
|
||||||
local echo_action=$1
|
local echo_action=$1
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
import sys
|
import sys
|
||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
import json
|
|
||||||
|
|
||||||
sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/')
|
sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/')
|
||||||
import salt.config
|
import salt.config
|
||||||
@@ -37,67 +36,17 @@ def check_needs_restarted():
|
|||||||
with open(outfile, 'w') as f:
|
with open(outfile, 'w') as f:
|
||||||
f.write(val)
|
f.write(val)
|
||||||
|
|
||||||
def check_for_fps():
|
|
||||||
feat = 'fps'
|
|
||||||
feat_full = feat.replace('ps', 'ips')
|
|
||||||
fps = 0
|
|
||||||
try:
|
|
||||||
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
|
|
||||||
if result.returncode == 0:
|
|
||||||
fps = 1
|
|
||||||
except FileNotFoundError:
|
|
||||||
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
|
|
||||||
try:
|
|
||||||
with open(fn, 'r') as f:
|
|
||||||
contents = f.read()
|
|
||||||
if '1' in contents:
|
|
||||||
fps = 1
|
|
||||||
except:
|
|
||||||
# Unknown, so assume 0
|
|
||||||
fps = 0
|
|
||||||
|
|
||||||
with open('/opt/so/log/sostatus/fps_enabled', 'w') as f:
|
|
||||||
f.write(str(fps))
|
|
||||||
|
|
||||||
def check_for_lks():
|
|
||||||
feat = 'Lks'
|
|
||||||
feat_full = feat.replace('ks', 'uks')
|
|
||||||
lks = 0
|
|
||||||
result = subprocess.run(['lsblk', '-p', '-J'], check=True, stdout=subprocess.PIPE)
|
|
||||||
data = json.loads(result.stdout)
|
|
||||||
for device in data['blockdevices']:
|
|
||||||
if 'children' in device:
|
|
||||||
for gc in device['children']:
|
|
||||||
if 'children' in gc:
|
|
||||||
try:
|
|
||||||
arg = 'is' + feat_full
|
|
||||||
result = subprocess.run(['cryptsetup', arg, gc['name']], stdout=subprocess.PIPE)
|
|
||||||
if result.returncode == 0:
|
|
||||||
lks = 1
|
|
||||||
except FileNotFoundError:
|
|
||||||
for ggc in gc['children']:
|
|
||||||
if 'crypt' in ggc['type']:
|
|
||||||
lks = 1
|
|
||||||
if lks:
|
|
||||||
break
|
|
||||||
with open('/opt/so/log/sostatus/lks_enabled', 'w') as f:
|
|
||||||
f.write(str(lks))
|
|
||||||
|
|
||||||
def fail(msg):
|
def fail(msg):
|
||||||
print(msg, file=sys.stderr)
|
print(msg, file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
|
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
|
||||||
if proc.stdout.strip() != "0":
|
if proc.stdout.strip() != "0":
|
||||||
fail("This program must be run as root")
|
fail("This program must be run as root")
|
||||||
# Ensure that umask is 0022 so that files created by this script have rw-r-r permissions
|
|
||||||
org_umask = os.umask(0o022)
|
|
||||||
check_needs_restarted()
|
check_needs_restarted()
|
||||||
check_for_fps()
|
|
||||||
check_for_lks()
|
|
||||||
# Restore umask to whatever value was set before this script was run. SXIG sets to 0077 rw---
|
|
||||||
os.umask(org_umask)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -50,14 +50,16 @@ container_list() {
|
|||||||
"so-idh"
|
"so-idh"
|
||||||
"so-idstools"
|
"so-idstools"
|
||||||
"so-influxdb"
|
"so-influxdb"
|
||||||
"so-kafka"
|
|
||||||
"so-kibana"
|
"so-kibana"
|
||||||
"so-kratos"
|
"so-kratos"
|
||||||
"so-logstash"
|
"so-logstash"
|
||||||
|
"so-mysql"
|
||||||
"so-nginx"
|
"so-nginx"
|
||||||
"so-pcaptools"
|
"so-pcaptools"
|
||||||
|
"so-playbook"
|
||||||
"so-redis"
|
"so-redis"
|
||||||
"so-soc"
|
"so-soc"
|
||||||
|
"so-soctopus"
|
||||||
"so-steno"
|
"so-steno"
|
||||||
"so-strelka-backend"
|
"so-strelka-backend"
|
||||||
"so-strelka-filestream"
|
"so-strelka-filestream"
|
||||||
|
|||||||
@@ -49,6 +49,10 @@ if [ "$CONTINUE" == "y" ]; then
|
|||||||
sed -i "s|$OLD_IP|$NEW_IP|g" $file
|
sed -i "s|$OLD_IP|$NEW_IP|g" $file
|
||||||
done
|
done
|
||||||
|
|
||||||
|
echo "Granting MySQL root user permissions on $NEW_IP"
|
||||||
|
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
|
||||||
|
echo "Removing MySQL root user from $OLD_IP"
|
||||||
|
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
|
||||||
echo "Updating Kibana dashboards"
|
echo "Updating Kibana dashboards"
|
||||||
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True
|
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True
|
||||||
|
|
||||||
|
|||||||
@@ -122,7 +122,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
|||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||||
@@ -155,11 +154,15 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
|||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
|
||||||
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
|
||||||
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
|
||||||
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error
|
||||||
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
|
||||||
@@ -198,13 +201,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
|||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
|
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
|
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules
|
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
|
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
|
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RESULT=0
|
RESULT=0
|
||||||
@@ -213,9 +210,7 @@ RESULT=0
|
|||||||
CONTAINER_IDS=$(docker ps -q)
|
CONTAINER_IDS=$(docker ps -q)
|
||||||
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
|
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
|
||||||
exclude_container so-idstools # ignore due to known issues and noisy logging
|
exclude_container so-idstools # ignore due to known issues and noisy logging
|
||||||
exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers
|
exclude_container so-playbook # ignore due to several playbook known issues
|
||||||
exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers
|
|
||||||
exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers
|
|
||||||
|
|
||||||
for container_id in $CONTAINER_IDS; do
|
for container_id in $CONTAINER_IDS; do
|
||||||
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
|
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
|
||||||
@@ -233,14 +228,10 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet
|
|||||||
exclude_log "spool" # disregard zeek analyze logs as this is data specific
|
exclude_log "spool" # disregard zeek analyze logs as this is data specific
|
||||||
exclude_log "import" # disregard imported test data the contains error strings
|
exclude_log "import" # disregard imported test data the contains error strings
|
||||||
exclude_log "update.log" # ignore playbook updates due to several known issues
|
exclude_log "update.log" # ignore playbook updates due to several known issues
|
||||||
|
exclude_log "playbook.log" # ignore due to several playbook known issues
|
||||||
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
|
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
|
||||||
exclude_log "cron-close.log" # ignore since Curator has been removed
|
exclude_log "cron-close.log" # ignore since Curator has been removed
|
||||||
exclude_log "curator.log" # ignore since Curator has been removed
|
exclude_log "curator.log" # ignore since Curator has been removed
|
||||||
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
|
|
||||||
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
|
|
||||||
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
|
|
||||||
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
|
|
||||||
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
|
|
||||||
|
|
||||||
for log_file in $(cat /tmp/log_check_files); do
|
for log_file in $(cat /tmp/log_check_files); do
|
||||||
status "Checking log file $log_file"
|
status "Checking log file $log_file"
|
||||||
|
|||||||
@@ -1,98 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0."
|
|
||||||
|
|
||||||
set -e
|
|
||||||
# This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot.
|
|
||||||
if [ -z $NOROOT ]; then
|
|
||||||
# Check for prerequisites
|
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
|
||||||
echo "This script must be run using sudo!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
ENROLL_TPM=N
|
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--enroll-tpm)
|
|
||||||
ENROLL_TPM=Y
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $0 [options]"
|
|
||||||
echo ""
|
|
||||||
echo "where options are:"
|
|
||||||
echo " --enroll-tpm for when TPM enrollment was not selected during ISO install."
|
|
||||||
echo ""
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
|
|
||||||
check_for_tpm() {
|
|
||||||
echo -n "Checking for TPM: "
|
|
||||||
if [ -d /sys/class/tpm/tpm0 ]; then
|
|
||||||
echo -e "tpm0 found."
|
|
||||||
TPM="yes"
|
|
||||||
# Check if TPM is using sha1 or sha256
|
|
||||||
if [ -d /sys/class/tpm/tpm0/pcr-sha1 ]; then
|
|
||||||
echo -e "TPM is using sha1.\n"
|
|
||||||
TPM_PCR="sha1"
|
|
||||||
elif [ -d /sys/class/tpm/tpm0/pcr-sha256 ]; then
|
|
||||||
echo -e "TPM is using sha256.\n"
|
|
||||||
TPM_PCR="sha256"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "No TPM found.\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_for_luks_partitions() {
|
|
||||||
echo "Checking for LUKS partitions"
|
|
||||||
for part in $(lsblk -o NAME,FSTYPE -ln | grep crypto_LUKS | awk '{print $1}'); do
|
|
||||||
echo "Found LUKS partition: $part"
|
|
||||||
LUKS_PARTITIONS+=("$part")
|
|
||||||
done
|
|
||||||
if [ ${#LUKS_PARTITIONS[@]} -eq 0 ]; then
|
|
||||||
echo -e "No LUKS partitions found.\n"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
enroll_tpm_in_luks() {
|
|
||||||
read -s -p "Enter the LUKS passphrase used during ISO install: " LUKS_PASSPHRASE
|
|
||||||
echo ""
|
|
||||||
for part in "${LUKS_PARTITIONS[@]}"; do
|
|
||||||
echo "Enrolling TPM for LUKS device: /dev/$part"
|
|
||||||
if [ "$TPM_PCR" == "sha1" ]; then
|
|
||||||
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha1","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
|
|
||||||
elif [ "$TPM_PCR" == "sha256" ]; then
|
|
||||||
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
regenerate_tpm_enrollment_token() {
|
|
||||||
for part in "${LUKS_PARTITIONS[@]}"; do
|
|
||||||
clevis luks regen -d /dev/$part -s 1 -q
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
check_for_tpm
|
|
||||||
check_for_luks_partitions
|
|
||||||
|
|
||||||
if [[ $ENROLL_TPM == "Y" ]]; then
|
|
||||||
enroll_tpm_in_luks
|
|
||||||
else
|
|
||||||
regenerate_tpm_enrollment_token
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Running dracut"
|
|
||||||
dracut -fv
|
|
||||||
echo -e "\nTPM configuration complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n"
|
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
. /usr/sbin/so-image-common
|
. /usr/sbin/so-image-common
|
||||||
|
|
||||||
REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"}
|
REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)}
|
||||||
REPLAYSPEED=${REPLAYSPEED:-10}
|
REPLAYSPEED=${REPLAYSPEED:-10}
|
||||||
|
|
||||||
mkdir -p /opt/so/samples
|
mkdir -p /opt/so/samples
|
||||||
@@ -89,7 +89,6 @@ function suricata() {
|
|||||||
-v ${LOG_PATH}:/var/log/suricata/:rw \
|
-v ${LOG_PATH}:/var/log/suricata/:rw \
|
||||||
-v ${NSM_PATH}/:/nsm/:rw \
|
-v ${NSM_PATH}/:/nsm/:rw \
|
||||||
-v "$PCAP:/input.pcap:ro" \
|
-v "$PCAP:/input.pcap:ro" \
|
||||||
-v /dev/null:/nsm/suripcap:rw \
|
|
||||||
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
|
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
|
||||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
|
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
|
||||||
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
|
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
|
||||||
@@ -248,7 +247,7 @@ fi
|
|||||||
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
||||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||||
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
|
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
|
||||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||||
|
|
||||||
status "Import complete!"
|
status "Import complete!"
|
||||||
status
|
status
|
||||||
|
|||||||
@@ -334,7 +334,6 @@ desktop_packages:
|
|||||||
- pulseaudio-libs
|
- pulseaudio-libs
|
||||||
- pulseaudio-libs-glib2
|
- pulseaudio-libs-glib2
|
||||||
- pulseaudio-utils
|
- pulseaudio-utils
|
||||||
- putty
|
|
||||||
- sane-airscan
|
- sane-airscan
|
||||||
- sane-backends
|
- sane-backends
|
||||||
- sane-backends-drivers-cameras
|
- sane-backends-drivers-cameras
|
||||||
|
|||||||
@@ -67,6 +67,13 @@ docker:
|
|||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
|
'so-mysql':
|
||||||
|
final_octet: 30
|
||||||
|
port_bindings:
|
||||||
|
- 0.0.0.0:3306:3306
|
||||||
|
custom_bind_mounts: []
|
||||||
|
extra_hosts: []
|
||||||
|
extra_env: []
|
||||||
'so-nginx':
|
'so-nginx':
|
||||||
final_octet: 31
|
final_octet: 31
|
||||||
port_bindings:
|
port_bindings:
|
||||||
@@ -77,10 +84,10 @@ docker:
|
|||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
'so-nginx-fleet-node':
|
'so-playbook':
|
||||||
final_octet: 31
|
final_octet: 32
|
||||||
port_bindings:
|
port_bindings:
|
||||||
- 8443:8443
|
- 0.0.0.0:3000:3000
|
||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
@@ -104,6 +111,13 @@ docker:
|
|||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
|
'so-soctopus':
|
||||||
|
final_octet: 35
|
||||||
|
port_bindings:
|
||||||
|
- 0.0.0.0:7000:7000
|
||||||
|
custom_bind_mounts: []
|
||||||
|
extra_hosts: []
|
||||||
|
extra_env: []
|
||||||
'so-strelka-backend':
|
'so-strelka-backend':
|
||||||
final_octet: 36
|
final_octet: 36
|
||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
@@ -180,19 +194,8 @@ docker:
|
|||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
ulimits:
|
|
||||||
- memlock=524288000
|
|
||||||
'so-zeek':
|
'so-zeek':
|
||||||
final_octet: 99
|
final_octet: 99
|
||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
'so-kafka':
|
|
||||||
final_octet: 88
|
|
||||||
port_bindings:
|
|
||||||
- 0.0.0.0:9092:9092
|
|
||||||
- 0.0.0.0:9093:9093
|
|
||||||
- 0.0.0.0:8778:8778
|
|
||||||
custom_bind_mounts: []
|
|
||||||
extra_hosts: []
|
|
||||||
extra_env: []
|
|
||||||
|
|||||||
@@ -20,30 +20,30 @@ dockergroup:
|
|||||||
dockerheldpackages:
|
dockerheldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.6.33-1
|
- containerd.io: 1.6.21-1
|
||||||
- docker-ce: 5:26.1.4-1~debian.12~bookworm
|
- docker-ce: 5:24.0.3-1~debian.12~bookworm
|
||||||
- docker-ce-cli: 5:26.1.4-1~debian.12~bookworm
|
- docker-ce-cli: 5:24.0.3-1~debian.12~bookworm
|
||||||
- docker-ce-rootless-extras: 5:26.1.4-1~debian.12~bookworm
|
- docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
{% elif grains.oscodename == 'jammy' %}
|
{% elif grains.oscodename == 'jammy' %}
|
||||||
dockerheldpackages:
|
dockerheldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.6.33-1
|
- containerd.io: 1.6.21-1
|
||||||
- docker-ce: 5:26.1.4-1~ubuntu.22.04~jammy
|
- docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy
|
||||||
- docker-ce-cli: 5:26.1.4-1~ubuntu.22.04~jammy
|
- docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy
|
||||||
- docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.22.04~jammy
|
- docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
{% else %}
|
{% else %}
|
||||||
dockerheldpackages:
|
dockerheldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.6.33-1
|
- containerd.io: 1.4.9-1
|
||||||
- docker-ce: 5:26.1.4-1~ubuntu.20.04~focal
|
- docker-ce: 5:20.10.8~3-0~ubuntu-focal
|
||||||
- docker-ce-cli: 5:26.1.4-1~ubuntu.20.04~focal
|
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal
|
||||||
- docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.20.04~focal
|
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -51,10 +51,10 @@ dockerheldpackages:
|
|||||||
dockerheldpackages:
|
dockerheldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.6.33-3.1.el9
|
- containerd.io: 1.6.21-3.1.el9
|
||||||
- docker-ce: 3:26.1.4-1.el9
|
- docker-ce: 24.0.4-1.el9
|
||||||
- docker-ce-cli: 1:26.1.4-1.el9
|
- docker-ce-cli: 24.0.4-1.el9
|
||||||
- docker-ce-rootless-extras: 26.1.4-1.el9
|
- docker-ce-rootless-extras: 24.0.4-1.el9
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -46,11 +46,13 @@ docker:
|
|||||||
so-kibana: *dockerOptions
|
so-kibana: *dockerOptions
|
||||||
so-kratos: *dockerOptions
|
so-kratos: *dockerOptions
|
||||||
so-logstash: *dockerOptions
|
so-logstash: *dockerOptions
|
||||||
|
so-mysql: *dockerOptions
|
||||||
so-nginx: *dockerOptions
|
so-nginx: *dockerOptions
|
||||||
so-nginx-fleet-node: *dockerOptions
|
so-playbook: *dockerOptions
|
||||||
so-redis: *dockerOptions
|
so-redis: *dockerOptions
|
||||||
so-sensoroni: *dockerOptions
|
so-sensoroni: *dockerOptions
|
||||||
so-soc: *dockerOptions
|
so-soc: *dockerOptions
|
||||||
|
so-soctopus: *dockerOptions
|
||||||
so-strelka-backend: *dockerOptions
|
so-strelka-backend: *dockerOptions
|
||||||
so-strelka-filestream: *dockerOptions
|
so-strelka-filestream: *dockerOptions
|
||||||
so-strelka-frontend: *dockerOptions
|
so-strelka-frontend: *dockerOptions
|
||||||
@@ -63,42 +65,5 @@ docker:
|
|||||||
so-elastic-agent: *dockerOptions
|
so-elastic-agent: *dockerOptions
|
||||||
so-telegraf: *dockerOptions
|
so-telegraf: *dockerOptions
|
||||||
so-steno: *dockerOptions
|
so-steno: *dockerOptions
|
||||||
so-suricata:
|
so-suricata: *dockerOptions
|
||||||
final_octet:
|
|
||||||
description: Last octet of the container IP address.
|
|
||||||
helpLink: docker.html
|
|
||||||
readonly: True
|
|
||||||
advanced: True
|
|
||||||
global: True
|
|
||||||
port_bindings:
|
|
||||||
description: List of port bindings for the container.
|
|
||||||
helpLink: docker.html
|
|
||||||
advanced: True
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
custom_bind_mounts:
|
|
||||||
description: List of custom local volume bindings.
|
|
||||||
advanced: True
|
|
||||||
helpLink: docker.html
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
extra_hosts:
|
|
||||||
description: List of additional host entries for the container.
|
|
||||||
advanced: True
|
|
||||||
helpLink: docker.html
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
extra_env:
|
|
||||||
description: List of additional ENV entries for the container.
|
|
||||||
advanced: True
|
|
||||||
helpLink: docker.html
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
ulimits:
|
|
||||||
description: Ulimits for the container, in bytes.
|
|
||||||
advanced: True
|
|
||||||
helpLink: docker.html
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
so-zeek: *dockerOptions
|
so-zeek: *dockerOptions
|
||||||
so-kafka: *dockerOptions
|
|
||||||
@@ -82,36 +82,6 @@ elastasomodulesync:
|
|||||||
- group: 933
|
- group: 933
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
elastacustomdir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/conf/elastalert/custom
|
|
||||||
- user: 933
|
|
||||||
- group: 933
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
elastacustomsync:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/conf/elastalert/custom
|
|
||||||
- source: salt://elastalert/files/custom
|
|
||||||
- user: 933
|
|
||||||
- group: 933
|
|
||||||
- makedirs: True
|
|
||||||
- file_mode: 660
|
|
||||||
- show_changes: False
|
|
||||||
|
|
||||||
elastapredefinedsync:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/conf/elastalert/predefined
|
|
||||||
- source: salt://elastalert/files/predefined
|
|
||||||
- user: 933
|
|
||||||
- group: 933
|
|
||||||
- makedirs: True
|
|
||||||
- template: jinja
|
|
||||||
- file_mode: 660
|
|
||||||
- context:
|
|
||||||
elastalert: {{ ELASTALERTMERGED }}
|
|
||||||
- show_changes: False
|
|
||||||
|
|
||||||
elastaconf:
|
elastaconf:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /opt/so/conf/elastalert/elastalert_config.yaml
|
- name: /opt/so/conf/elastalert/elastalert_config.yaml
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
elastalert:
|
elastalert:
|
||||||
enabled: False
|
enabled: False
|
||||||
alerter_parameters: ""
|
|
||||||
config:
|
config:
|
||||||
rules_folder: /opt/elastalert/rules/
|
rules_folder: /opt/elastalert/rules/
|
||||||
scan_subdirectories: true
|
scan_subdirectories: true
|
||||||
|
|||||||
@@ -30,8 +30,6 @@ so-elastalert:
|
|||||||
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
|
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
|
||||||
- /opt/so/log/elastalert:/var/log/elastalert:rw
|
- /opt/so/log/elastalert:/var/log/elastalert:rw
|
||||||
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
|
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
|
||||||
- /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro
|
|
||||||
- /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro
|
|
||||||
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
|
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
|
||||||
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
||||||
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
THIS IS A PLACEHOLDER FILE
|
|
||||||
38
salt/elastalert/files/modules/so/playbook-es.py
Normal file
38
salt/elastalert/files/modules/so/playbook-es.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
from time import gmtime, strftime
|
||||||
|
import requests,json
|
||||||
|
from elastalert.alerts import Alerter
|
||||||
|
|
||||||
|
import urllib3
|
||||||
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||||
|
|
||||||
|
class PlaybookESAlerter(Alerter):
|
||||||
|
"""
|
||||||
|
Use matched data to create alerts in elasticsearch
|
||||||
|
"""
|
||||||
|
|
||||||
|
required_options = set(['play_title','play_url','sigma_level'])
|
||||||
|
|
||||||
|
def alert(self, matches):
|
||||||
|
for match in matches:
|
||||||
|
today = strftime("%Y.%m.%d", gmtime())
|
||||||
|
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
|
||||||
|
creds = None
|
||||||
|
if 'es_username' in self.rule and 'es_password' in self.rule:
|
||||||
|
creds = (self.rule['es_username'], self.rule['es_password'])
|
||||||
|
|
||||||
|
payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||||
|
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/"
|
||||||
|
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
|
||||||
|
|
||||||
|
def get_info(self):
|
||||||
|
return {'type': 'PlaybookESAlerter'}
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
from time import gmtime, strftime
|
|
||||||
import requests,json
|
|
||||||
from elastalert.alerts import Alerter
|
|
||||||
|
|
||||||
import urllib3
|
|
||||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
||||||
|
|
||||||
class SecurityOnionESAlerter(Alerter):
|
|
||||||
"""
|
|
||||||
Use matched data to create alerts in Elasticsearch.
|
|
||||||
"""
|
|
||||||
|
|
||||||
required_options = set(['detection_title', 'sigma_level'])
|
|
||||||
optional_fields = ['sigma_category', 'sigma_product', 'sigma_service']
|
|
||||||
|
|
||||||
def alert(self, matches):
|
|
||||||
for match in matches:
|
|
||||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
|
|
||||||
headers = {"Content-Type": "application/json"}
|
|
||||||
|
|
||||||
creds = None
|
|
||||||
if 'es_username' in self.rule and 'es_password' in self.rule:
|
|
||||||
creds = (self.rule['es_username'], self.rule['es_password'])
|
|
||||||
|
|
||||||
# Start building the rule dict
|
|
||||||
rule_info = {
|
|
||||||
"name": self.rule['detection_title'],
|
|
||||||
"uuid": self.rule['detection_public_id']
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add optional fields if they are present in the rule
|
|
||||||
for field in self.optional_fields:
|
|
||||||
rule_key = field.split('_')[-1] # Assumes field format "sigma_<key>"
|
|
||||||
if field in self.rule:
|
|
||||||
rule_info[rule_key] = self.rule[field]
|
|
||||||
|
|
||||||
# Construct the payload with the conditional rule_info
|
|
||||||
payload = {
|
|
||||||
"tags": "alert",
|
|
||||||
"rule": rule_info,
|
|
||||||
"event": {
|
|
||||||
"severity": self.rule['event.severity'],
|
|
||||||
"module": self.rule['event.module'],
|
|
||||||
"dataset": self.rule['event.dataset'],
|
|
||||||
"severity_label": self.rule['sigma_level']
|
|
||||||
},
|
|
||||||
"sigma_level": self.rule['sigma_level'],
|
|
||||||
"event_data": match,
|
|
||||||
"@timestamp": timestamp
|
|
||||||
}
|
|
||||||
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-detections.alerts-so/_doc/"
|
|
||||||
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
|
|
||||||
|
|
||||||
def get_info(self):
|
|
||||||
return {'type': 'SecurityOnionESAlerter'}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
{% if elastalert.get('jira_user', '') | length > 0 and elastalert.get('jira_pass', '') | length > 0 %}
|
|
||||||
user: {{ elastalert.jira_user }}
|
|
||||||
password: {{ elastalert.jira_pass }}
|
|
||||||
{% else %}
|
|
||||||
apikey: {{ elastalert.get('jira_api_key', '') }}
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
user: {{ elastalert.get('smtp_user', '') }}
|
|
||||||
password: {{ elastalert.get('smtp_pass', '') }}
|
|
||||||
@@ -13,19 +13,3 @@
|
|||||||
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
||||||
|
|
||||||
{% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %}
|
{% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %}
|
||||||
|
|
||||||
{% if 'ntf' in salt['pillar.get']('features', []) %}
|
|
||||||
{% set params = ELASTALERTMERGED.get('alerter_parameters', '') | load_yaml %}
|
|
||||||
{% if params != None and params | length > 0 %}
|
|
||||||
{% do ELASTALERTMERGED.config.update(params) %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if ELASTALERTMERGED.get('smtp_user', '') | length > 0 %}
|
|
||||||
{% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if ELASTALERTMERGED.get('jira_user', '') | length > 0 or ELASTALERTMERGED.get('jira_key', '') | length > 0 %}
|
|
||||||
{% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -2,99 +2,6 @@ elastalert:
|
|||||||
enabled:
|
enabled:
|
||||||
description: You can enable or disable Elastalert.
|
description: You can enable or disable Elastalert.
|
||||||
helpLink: elastalert.html
|
helpLink: elastalert.html
|
||||||
alerter_parameters:
|
|
||||||
title: Alerter Parameters
|
|
||||||
description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
multiline: True
|
|
||||||
syntax: yaml
|
|
||||||
helpLink: elastalert.html
|
|
||||||
forcedType: string
|
|
||||||
jira_api_key:
|
|
||||||
title: Jira API Key
|
|
||||||
description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
sensitive: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
forcedType: string
|
|
||||||
jira_pass:
|
|
||||||
title: Jira Password
|
|
||||||
description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
sensitive: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
forcedType: string
|
|
||||||
jira_user:
|
|
||||||
title: Jira Username
|
|
||||||
description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
forcedType: string
|
|
||||||
smtp_pass:
|
|
||||||
title: SMTP Password
|
|
||||||
description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
sensitive: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
forcedType: string
|
|
||||||
smtp_user:
|
|
||||||
title: SMTP Username
|
|
||||||
description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
forcedType: string
|
|
||||||
files:
|
|
||||||
custom:
|
|
||||||
alertmanager_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
gelf_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
http_post_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
http_post2_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
ms_teams_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
pagerduty_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
rocket_chat_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
smtp__crt:
|
|
||||||
description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
smtp__key:
|
|
||||||
description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
slack_ca__crt:
|
|
||||||
description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
|
||||||
global: True
|
|
||||||
file: True
|
|
||||||
helpLink: elastalert.html
|
|
||||||
config:
|
config:
|
||||||
disable_rules_on_error:
|
disable_rules_on_error:
|
||||||
description: Disable rules on failure.
|
description: Disable rules on failure.
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ elasticfleet:
|
|||||||
- azure
|
- azure
|
||||||
- barracuda
|
- barracuda
|
||||||
- carbonblack_edr
|
- carbonblack_edr
|
||||||
- cef
|
|
||||||
- checkpoint
|
- checkpoint
|
||||||
- cisco_asa
|
- cisco_asa
|
||||||
- cisco_duo
|
- cisco_duo
|
||||||
@@ -46,8 +45,6 @@ elasticfleet:
|
|||||||
- cisco_ise
|
- cisco_ise
|
||||||
- cisco_meraki
|
- cisco_meraki
|
||||||
- cisco_umbrella
|
- cisco_umbrella
|
||||||
- citrix_adc
|
|
||||||
- citrix_waf
|
|
||||||
- cloudflare
|
- cloudflare
|
||||||
- crowdstrike
|
- crowdstrike
|
||||||
- darktrace
|
- darktrace
|
||||||
@@ -66,7 +63,6 @@ elasticfleet:
|
|||||||
- http_endpoint
|
- http_endpoint
|
||||||
- httpjson
|
- httpjson
|
||||||
- iis
|
- iis
|
||||||
- journald
|
|
||||||
- juniper
|
- juniper
|
||||||
- juniper_srx
|
- juniper_srx
|
||||||
- kafka_log
|
- kafka_log
|
||||||
@@ -79,7 +75,6 @@ elasticfleet:
|
|||||||
- mimecast
|
- mimecast
|
||||||
- mysql
|
- mysql
|
||||||
- netflow
|
- netflow
|
||||||
- nginx
|
|
||||||
- o365
|
- o365
|
||||||
- okta
|
- okta
|
||||||
- osquery_manager
|
- osquery_manager
|
||||||
@@ -108,7 +103,6 @@ elasticfleet:
|
|||||||
- udp
|
- udp
|
||||||
- vsphere
|
- vsphere
|
||||||
- windows
|
- windows
|
||||||
- winlog
|
|
||||||
- zscaler_zia
|
- zscaler_zia
|
||||||
- zscaler_zpa
|
- zscaler_zpa
|
||||||
- 1password
|
- 1password
|
||||||
@@ -119,8 +113,3 @@ elasticfleet:
|
|||||||
base_url: https://api.platform.sublimesecurity.com
|
base_url: https://api.platform.sublimesecurity.com
|
||||||
poll_interval: 5m
|
poll_interval: 5m
|
||||||
limit: 100
|
limit: 100
|
||||||
kismet:
|
|
||||||
base_url: http://localhost:2501
|
|
||||||
poll_interval: 1m
|
|
||||||
api_key:
|
|
||||||
enabled_nodes: []
|
|
||||||
|
|||||||
@@ -17,19 +17,12 @@ include:
|
|||||||
- elasticfleet.sostatus
|
- elasticfleet.sostatus
|
||||||
- ssl
|
- ssl
|
||||||
|
|
||||||
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
|
||||||
wait_for_elasticsearch_elasticfleet:
|
|
||||||
cmd.run:
|
|
||||||
- name: so-elasticsearch-wait
|
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Logstash Outputs
|
# If enabled, automatically update Fleet Logstash Outputs
|
||||||
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
|
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
|
||||||
so-elastic-fleet-auto-configure-logstash-outputs:
|
so-elastic-fleet-auto-configure-logstash-outputs:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/so-elastic-fleet-outputs-update
|
- name: /usr/sbin/so-elastic-fleet-outputs-update
|
||||||
- retry:
|
- retry: True
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||||
@@ -37,35 +30,15 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
|||||||
so-elastic-fleet-auto-configure-server-urls:
|
so-elastic-fleet-auto-configure-server-urls:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/so-elastic-fleet-urls-update
|
- name: /usr/sbin/so-elastic-fleet-urls-update
|
||||||
- retry:
|
- retry: True
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
|
# Automatically update Fleet Server Elasticsearch URLs
|
||||||
{% if grains.role not in ['so-fleet'] %}
|
{% if grains.role not in ['so-fleet'] %}
|
||||||
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
||||||
- retry:
|
- retry: True
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
so-elastic-fleet-auto-configure-artifact-urls:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Sync Elastic Agent artifacts to Fleet Node
|
|
||||||
{% if grains.role in ['so-fleet'] %}
|
|
||||||
elasticagent_syncartifacts:
|
|
||||||
file.recurse:
|
|
||||||
- name: /nsm/elastic-fleet/artifacts/beats
|
|
||||||
- source: salt://beats
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if SERVICETOKEN != '' %}
|
{% if SERVICETOKEN != '' %}
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
|
||||||
{% raw %}
|
|
||||||
{
|
|
||||||
"package": {
|
|
||||||
"name": "httpjson",
|
|
||||||
"version": ""
|
|
||||||
},
|
|
||||||
"name": "kismet-logs",
|
|
||||||
"namespace": "so",
|
|
||||||
"description": "Kismet Logs",
|
|
||||||
"policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}",
|
|
||||||
"inputs": {
|
|
||||||
"generic-httpjson": {
|
|
||||||
"enabled": true,
|
|
||||||
"streams": {
|
|
||||||
"httpjson.generic": {
|
|
||||||
"enabled": true,
|
|
||||||
"vars": {
|
|
||||||
"data_stream.dataset": "kismet",
|
|
||||||
"request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson",
|
|
||||||
"request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}",
|
|
||||||
"request_method": "GET",
|
|
||||||
"request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'",
|
|
||||||
"request_redirect_headers_ban_list": [],
|
|
||||||
"oauth_scopes": [],
|
|
||||||
"processors": "",
|
|
||||||
"tags": [],
|
|
||||||
"pipeline": "kismet.common"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"force": true
|
|
||||||
}
|
|
||||||
{% endraw %}
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"package": {
|
|
||||||
"name": "winlog",
|
|
||||||
"version": ""
|
|
||||||
},
|
|
||||||
"name": "windows-defender",
|
|
||||||
"namespace": "default",
|
|
||||||
"description": "Windows Defender - Operational logs",
|
|
||||||
"policy_id": "endpoints-initial",
|
|
||||||
"inputs": {
|
|
||||||
"winlogs-winlog": {
|
|
||||||
"enabled": true,
|
|
||||||
"streams": {
|
|
||||||
"winlog.winlog": {
|
|
||||||
"enabled": true,
|
|
||||||
"vars": {
|
|
||||||
"channel": "Microsoft-Windows-Windows Defender/Operational",
|
|
||||||
"data_stream.dataset": "winlog.winlog",
|
|
||||||
"preserve_original_event": false,
|
|
||||||
"providers": [],
|
|
||||||
"ignore_older": "72h",
|
|
||||||
"language": 0,
|
|
||||||
"tags": [] }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"force": true
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
{
|
|
||||||
"package": {
|
|
||||||
"name": "log",
|
|
||||||
"version": ""
|
|
||||||
},
|
|
||||||
"name": "rita-logs",
|
|
||||||
"namespace": "so",
|
|
||||||
"description": "RITA Logs",
|
|
||||||
"policy_id": "so-grid-nodes_general",
|
|
||||||
"vars": {},
|
|
||||||
"inputs": {
|
|
||||||
"logs-logfile": {
|
|
||||||
"enabled": true,
|
|
||||||
"streams": {
|
|
||||||
"log.logs": {
|
|
||||||
"enabled": true,
|
|
||||||
"vars": {
|
|
||||||
"paths": [
|
|
||||||
"/nsm/rita/beacons.csv",
|
|
||||||
"/nsm/rita/exploded-dns.csv",
|
|
||||||
"/nsm/rita/long-connections.csv"
|
|
||||||
],
|
|
||||||
"exclude_files": [],
|
|
||||||
"ignore_older": "72h",
|
|
||||||
"data_stream.dataset": "rita",
|
|
||||||
"tags": [],
|
|
||||||
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
|
|
||||||
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
{
|
|
||||||
"policy_id": "so-grid-nodes_general",
|
|
||||||
"package": {
|
|
||||||
"name": "log",
|
|
||||||
"version": ""
|
|
||||||
},
|
|
||||||
"name": "soc-detections-logs",
|
|
||||||
"description": "Security Onion Console - Detections Logs",
|
|
||||||
"namespace": "so",
|
|
||||||
"inputs": {
|
|
||||||
"logs-logfile": {
|
|
||||||
"enabled": true,
|
|
||||||
"streams": {
|
|
||||||
"log.logs": {
|
|
||||||
"enabled": true,
|
|
||||||
"vars": {
|
|
||||||
"paths": [
|
|
||||||
"/opt/so/log/soc/detections_runtime-status_sigma.log",
|
|
||||||
"/opt/so/log/soc/detections_runtime-status_yara.log"
|
|
||||||
],
|
|
||||||
"exclude_files": [],
|
|
||||||
"ignore_older": "72h",
|
|
||||||
"data_stream.dataset": "soc",
|
|
||||||
"tags": [
|
|
||||||
"so-soc"
|
|
||||||
],
|
|
||||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
|
||||||
"custom": "pipeline: common"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"force": true
|
|
||||||
}
|
|
||||||
@@ -16,9 +16,6 @@
|
|||||||
"paths": [
|
"paths": [
|
||||||
"/var/log/auth.log*",
|
"/var/log/auth.log*",
|
||||||
"/var/log/secure*"
|
"/var/log/secure*"
|
||||||
],
|
|
||||||
"tags": [
|
|
||||||
"so-grid-node"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -28,9 +25,6 @@
|
|||||||
"paths": [
|
"paths": [
|
||||||
"/var/log/messages*",
|
"/var/log/messages*",
|
||||||
"/var/log/syslog*"
|
"/var/log/syslog*"
|
||||||
],
|
|
||||||
"tags": [
|
|
||||||
"so-grid-node"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,9 +16,6 @@
|
|||||||
"paths": [
|
"paths": [
|
||||||
"/var/log/auth.log*",
|
"/var/log/auth.log*",
|
||||||
"/var/log/secure*"
|
"/var/log/secure*"
|
||||||
],
|
|
||||||
"tags": [
|
|
||||||
"so-grid-node"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -28,9 +25,6 @@
|
|||||||
"paths": [
|
"paths": [
|
||||||
"/var/log/messages*",
|
"/var/log/messages*",
|
||||||
"/var/log/syslog*"
|
"/var/log/syslog*"
|
||||||
],
|
|
||||||
"tags": [
|
|
||||||
"so-grid-node"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,29 +79,3 @@ elasticfleet:
|
|||||||
helpLink: elastic-fleet.html
|
helpLink: elastic-fleet.html
|
||||||
advanced: True
|
advanced: True
|
||||||
forcedType: int
|
forcedType: int
|
||||||
kismet:
|
|
||||||
base_url:
|
|
||||||
description: Base URL for Kismet.
|
|
||||||
global: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
advanced: True
|
|
||||||
forcedType: string
|
|
||||||
poll_interval:
|
|
||||||
description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes.
|
|
||||||
global: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
advanced: True
|
|
||||||
forcedType: string
|
|
||||||
api_key:
|
|
||||||
description: API key for Kismet.
|
|
||||||
global: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
advanced: True
|
|
||||||
forcedType: string
|
|
||||||
sensitive: True
|
|
||||||
enabled_nodes:
|
|
||||||
description: Fleet nodes with the Kismet integration enabled. Enter one per line.
|
|
||||||
global: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
advanced: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
|
|||||||
|
|
||||||
for i in {1..30}
|
for i in {1..30}
|
||||||
do
|
do
|
||||||
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys?perPage=100" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||||
FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',')
|
FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',')
|
||||||
if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi
|
if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi
|
||||||
done
|
done
|
||||||
@@ -46,7 +46,7 @@ do
|
|||||||
done
|
done
|
||||||
|
|
||||||
printf "\n### Stripping out unused components"
|
printf "\n### Stripping out unused components"
|
||||||
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*heart.*\|.*cloud.*' -delete
|
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete
|
||||||
|
|
||||||
printf "\n### Tarring everything up again"
|
printf "\n### Tarring everything up again"
|
||||||
for OS in "${OSARCH[@]}"
|
for OS in "${OSARCH[@]}"
|
||||||
@@ -72,5 +72,5 @@ do
|
|||||||
printf "\n### $GOOS/$GOARCH Installer Generated...\n"
|
printf "\n### $GOOS/$GOARCH Installer Generated...\n"
|
||||||
done
|
done
|
||||||
|
|
||||||
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n"
|
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace"
|
||||||
rm -rf /nsm/elastic-agent-workspace
|
rm -rf /nsm/elastic-agent-workspace
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
|
|||||||
@@ -1,90 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
|
||||||
# this file except in compliance with the Elastic License 2.0.
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
# Only run on Managers
|
|
||||||
if ! is_manager_node; then
|
|
||||||
printf "Not a Manager Node... Exiting"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Function to check if an array contains a value
|
|
||||||
array_contains () {
|
|
||||||
local array="$1[@]"
|
|
||||||
local seeking=$2
|
|
||||||
local in=1
|
|
||||||
for element in "${!array}"; do
|
|
||||||
if [[ $element == "$seeking" ]]; then
|
|
||||||
in=0
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
return $in
|
|
||||||
}
|
|
||||||
|
|
||||||
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
|
|
||||||
LOGSTASHNODES='{{ salt['pillar.get']('logstash:nodes', {}) | tojson }}'
|
|
||||||
|
|
||||||
# Initialize an array for new hosts from Fleet Nodes
|
|
||||||
declare -a NEW_LIST=()
|
|
||||||
|
|
||||||
# Query for Fleet Nodes & add them to the list (Hostname)
|
|
||||||
if grep -q "fleet" <<< "$LOGSTASHNODES"; then
|
|
||||||
readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES")
|
|
||||||
for NODE in "${FLEETNODES[@]}"; do
|
|
||||||
URL="http://$NODE:8443/artifacts/"
|
|
||||||
NAME="FleetServer_$NODE"
|
|
||||||
NEW_LIST+=("$URL=$NAME")
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create an array for expected hosts and their names
|
|
||||||
declare -A expected_urls=(
|
|
||||||
["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}"
|
|
||||||
["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Merge NEW_LIST into expected_urls
|
|
||||||
for entry in "${NEW_LIST[@]}"; do
|
|
||||||
# Extract URL and Name from each entry
|
|
||||||
IFS='=' read -r URL NAME <<< "$entry"
|
|
||||||
# Add to expected_urls, automatically handling URL as key and NAME as value
|
|
||||||
expected_urls["$URL"]="$NAME"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Fetch the current hosts from the API
|
|
||||||
current_urls=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host)
|
|
||||||
|
|
||||||
# Convert current hosts to an array
|
|
||||||
IFS=$'\n' read -rd '' -a current_urls_array <<<"$current_urls"
|
|
||||||
|
|
||||||
# Flag to track if any host was added
|
|
||||||
any_url_added=0
|
|
||||||
|
|
||||||
# Check each expected host
|
|
||||||
for host in "${!expected_urls[@]}"; do
|
|
||||||
array_contains current_urls_array "$host" || {
|
|
||||||
echo "$host (${expected_urls[$host]}) is missing. Adding it..."
|
|
||||||
|
|
||||||
# Prepare the JSON payload
|
|
||||||
JSON_STRING=$( jq -n \
|
|
||||||
--arg NAME "${expected_urls[$host]}" \
|
|
||||||
--arg URL "$host" \
|
|
||||||
'{"name":$NAME,"host":$URL}' )
|
|
||||||
|
|
||||||
# Create the missing host
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
|
||||||
|
|
||||||
# Flag that an artifact URL was added
|
|
||||||
any_url_added=1
|
|
||||||
}
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
|
|
||||||
if [[ $any_url_added -eq 0 ]]; then
|
|
||||||
echo "All expected artifact URLs are present. No updates needed."
|
|
||||||
fi
|
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
@@ -21,103 +19,63 @@ function update_logstash_outputs() {
|
|||||||
# Update Logstash Outputs
|
# Update Logstash Outputs
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||||
}
|
}
|
||||||
function update_kafka_outputs() {
|
|
||||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
|
||||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
|
||||||
|
|
||||||
JSON_STRING=$(jq -n \
|
# Get current list of Logstash Outputs
|
||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
|
||||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
|
||||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
|
||||||
# Update Kafka outputs
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
|
||||||
}
|
|
||||||
|
|
||||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
# Check to make sure that the server responded with good data - else, bail from script
|
||||||
# Get current list of Kafka Outputs
|
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
|
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
|
||||||
|
|
||||||
# Check to make sure that the server responded with good data - else, bail from script
|
|
||||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
|
||||||
if [ "$CHECKSUM" != "so-manager_kafka" ]; then
|
|
||||||
printf "Failed to query for current Kafka Outputs..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the current list of kafka outputs & hash them
|
|
||||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
|
||||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
|
||||||
|
|
||||||
declare -a NEW_LIST=()
|
|
||||||
|
|
||||||
# Query for the current Grid Nodes that are running kafka
|
|
||||||
KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local')
|
|
||||||
|
|
||||||
# Query for Kafka nodes with Broker role and add hostname to list
|
|
||||||
while IFS= read -r line; do
|
|
||||||
NEW_LIST+=("$line")
|
|
||||||
done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES)
|
|
||||||
|
|
||||||
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
|
|
||||||
{% else %}
|
|
||||||
# Get current list of Logstash Outputs
|
|
||||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
|
|
||||||
|
|
||||||
# Check to make sure that the server responded with good data - else, bail from script
|
|
||||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
|
||||||
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
|
|
||||||
printf "Failed to query for current Logstash Outputs..."
|
printf "Failed to query for current Logstash Outputs..."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Get the current list of Logstash outputs & hash them
|
# Get the current list of Logstash outputs & hash them
|
||||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||||
|
|
||||||
declare -a NEW_LIST=()
|
declare -a NEW_LIST=()
|
||||||
|
|
||||||
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
|
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
|
||||||
{% if ELASTICFLEETMERGED.enable_manager_output %}
|
{% if ELASTICFLEETMERGED.enable_manager_output %}
|
||||||
# Create array & add initial elements
|
# Create array & add initial elements
|
||||||
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
||||||
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
|
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
|
||||||
else
|
else
|
||||||
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
|
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
|
||||||
fi
|
fi
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Query for FQDN entries & add them to the list
|
# Query for FQDN entries & add them to the list
|
||||||
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
||||||
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
|
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
|
||||||
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
|
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
|
||||||
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
||||||
do
|
do
|
||||||
NEW_LIST+=("$CUSTOMNAME:5055")
|
NEW_LIST+=("$CUSTOMNAME:5055")
|
||||||
done
|
done
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Query for the current Grid Nodes that are running Logstash
|
# Query for the current Grid Nodes that are running Logstash
|
||||||
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
|
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
|
||||||
|
|
||||||
# Query for Receiver Nodes & add them to the list
|
# Query for Receiver Nodes & add them to the list
|
||||||
if grep -q "receiver" <<< $LOGSTASHNODES; then
|
if grep -q "receiver" <<< $LOGSTASHNODES; then
|
||||||
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
|
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
|
||||||
for NODE in "${RECEIVERNODES[@]}"
|
for NODE in "${RECEIVERNODES[@]}"
|
||||||
do
|
do
|
||||||
NEW_LIST+=("$NODE:5055")
|
NEW_LIST+=("$NODE:5055")
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Query for Fleet Nodes & add them to the list
|
# Query for Fleet Nodes & add them to the list
|
||||||
if grep -q "fleet" <<< $LOGSTASHNODES; then
|
if grep -q "fleet" <<< $LOGSTASHNODES; then
|
||||||
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
|
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
|
||||||
for NODE in "${FLEETNODES[@]}"
|
for NODE in "${FLEETNODES[@]}"
|
||||||
do
|
do
|
||||||
NEW_LIST+=("$NODE:5055")
|
NEW_LIST+=("$NODE:5055")
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Sort & hash the new list of Logstash Outputs
|
# Sort & hash the new list of Logstash Outputs
|
||||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||||
@@ -127,28 +85,9 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
|||||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||||
printf "\nHashes match - no update needed.\n"
|
printf "\nHashes match - no update needed.\n"
|
||||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||||
|
|
||||||
# Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed
|
|
||||||
printf "Checking if the correct output policy is set as default\n"
|
|
||||||
OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON)
|
|
||||||
OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON)
|
|
||||||
if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then
|
|
||||||
printf "Default output policy needs to be updated.\n"
|
|
||||||
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
|
|
||||||
update_kafka_outputs
|
|
||||||
{%- else %}
|
|
||||||
update_logstash_outputs
|
|
||||||
{%- endif %}
|
|
||||||
else
|
|
||||||
printf "Default output policy is set - no update needed.\n"
|
|
||||||
fi
|
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
printf "\nHashes don't match - update needed.\n"
|
printf "\nHashes don't match - update needed.\n"
|
||||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||||
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
|
|
||||||
update_kafka_outputs
|
|
||||||
{%- else %}
|
|
||||||
update_logstash_outputs
|
update_logstash_outputs
|
||||||
{%- endif %}
|
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -77,11 +77,6 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
|
|||||||
printf "\n\n"
|
printf "\n\n"
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n"
|
|
||||||
{% if grains.role not in ['so-import', 'so-eval'] %}
|
|
||||||
/usr/sbin/so-kafka-fleet-output-policy
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Add Manager Hostname & URL Base to Fleet Host URLs
|
# Add Manager Hostname & URL Base to Fleet Host URLs
|
||||||
printf "\nAdd SO-Manager Fleet URL\n"
|
printf "\nAdd SO-Manager Fleet URL\n"
|
||||||
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
|
|||||||
@@ -1,52 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
# Check to make sure that Kibana API is up & ready
|
|
||||||
RETURN_CODE=0
|
|
||||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
|
||||||
RETURN_CODE=$?
|
|
||||||
|
|
||||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
|
||||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
|
||||||
|
|
||||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
|
||||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
|
||||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
|
||||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
|
||||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
|
||||||
JSON_STRING=$( jq -n \
|
|
||||||
--arg KAFKACRT "$KAFKACRT" \
|
|
||||||
--arg KAFKAKEY "$KAFKAKEY" \
|
|
||||||
--arg KAFKACA "$KAFKACA" \
|
|
||||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
|
||||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
|
||||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-securityonion","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
|
||||||
)
|
|
||||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
|
||||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
|
||||||
|
|
||||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
|
||||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
|
||||||
exit 1
|
|
||||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
|
||||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
|
||||||
fi
|
|
||||||
|
|
||||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
|
||||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
|
||||||
fi
|
|
||||||
{% else %}
|
|
||||||
echo -e "\nNo update required...\n"
|
|
||||||
{% endif %}
|
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
|
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
|
||||||
|
|||||||
@@ -118,19 +118,6 @@ esingestconf:
|
|||||||
- user: 930
|
- user: 930
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
# Auto-generate Elasticsearch ingest node pipelines from pillar
|
|
||||||
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
|
|
||||||
es_ingest_conf_{{pipeline}}:
|
|
||||||
file.managed:
|
|
||||||
- name: /opt/so/conf/elasticsearch/ingest/{{ pipeline }}
|
|
||||||
- source: salt://elasticsearch/base-template.json.jinja
|
|
||||||
- defaults:
|
|
||||||
TEMPLATE_CONFIG: {{ config }}
|
|
||||||
- template: jinja
|
|
||||||
- onchanges_in:
|
|
||||||
- file: so-pipelines-reload
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
eslog4jfile:
|
eslog4jfile:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /opt/so/conf/elasticsearch/log4j2.properties
|
- name: /opt/so/conf/elasticsearch/log4j2.properties
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -200,15 +200,9 @@ so-elasticsearch-roles-load:
|
|||||||
- require:
|
- require:
|
||||||
- docker_container: so-elasticsearch
|
- docker_container: so-elasticsearch
|
||||||
- file: elasticsearch_sbin_jinja
|
- file: elasticsearch_sbin_jinja
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||||
{% if ELASTICSEARCHMERGED.index_clean %}
|
|
||||||
{% set ap = "present" %}
|
|
||||||
{% else %}
|
|
||||||
{% set ap = "absent" %}
|
|
||||||
{% endif %}
|
|
||||||
so-elasticsearch-indices-delete:
|
so-elasticsearch-indices-delete:
|
||||||
cron.{{ap}}:
|
cron.present:
|
||||||
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
||||||
- identifier: so-elasticsearch-indices-delete
|
- identifier: so-elasticsearch-indices-delete
|
||||||
- user: root
|
- user: root
|
||||||
@@ -218,7 +212,6 @@ so-elasticsearch-indices-delete:
|
|||||||
- month: '*'
|
- month: '*'
|
||||||
- dayweek: '*'
|
- dayweek: '*'
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|||||||
@@ -57,11 +57,10 @@
|
|||||||
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } },
|
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }},
|
||||||
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
|
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
|
||||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
||||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
|
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }},
|
||||||
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
|
|
||||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||||
{%- endraw %}
|
{%- endraw %}
|
||||||
{%- if HIGHLANDER %}
|
{%- if HIGHLANDER %}
|
||||||
|
|||||||
@@ -80,11 +80,9 @@
|
|||||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
||||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
||||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
||||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
|
||||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
|
||||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||||
],
|
],
|
||||||
"on_failure": [
|
"on_failure": [
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_macaddr",
|
|
||||||
"target_field": "network.wireless.bssid"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked",
|
|
||||||
"target_field": "network.wireless.ssid_cloaked",
|
|
||||||
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid",
|
|
||||||
"target_field": "network.wireless.ssid",
|
|
||||||
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "network.wireless.ssid",
|
|
||||||
"value": "Hidden",
|
|
||||||
"if": "ctx?.network?.wireless?.ssid_cloaked != null && ctx?.network?.wireless?.ssid_cloaked == 1"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc",
|
|
||||||
"target_field": "network.wireless.channel_utilization",
|
|
||||||
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_last_bssid",
|
|
||||||
"target_field": "network.wireless.bssid"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_associated_client_map",
|
|
||||||
"processor": {
|
|
||||||
"append": {
|
|
||||||
"field": "network.wireless.associated_clients",
|
|
||||||
"value": "{{_ingest._key}}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_macaddr",
|
|
||||||
"target_field": "client.mac"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_last_bssid",
|
|
||||||
"target_field": "network.wireless.bssid"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_macaddr",
|
|
||||||
"target_field": "client.mac"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_last_bssid",
|
|
||||||
"target_field": "network.wireless.last_connected_bssid",
|
|
||||||
"if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_client_map",
|
|
||||||
"processor": {
|
|
||||||
"append": {
|
|
||||||
"field": "network.wireless.known_connected_bssid",
|
|
||||||
"value": "{{_ingest._key}}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"if": "ctx?.message2?.dot11_device?.dot11_device_client_map != null"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,159 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"json": {
|
|
||||||
"field": "message",
|
|
||||||
"target_field": "message2"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"date": {
|
|
||||||
"field": "message2.kismet_device_base_mod_time",
|
|
||||||
"formats": [
|
|
||||||
"epoch_second"
|
|
||||||
],
|
|
||||||
"target_field": "@timestamp"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.category",
|
|
||||||
"value": "network"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"dissect": {
|
|
||||||
"field": "message2.kismet_device_base_type",
|
|
||||||
"pattern": "%{wifi} %{device_type}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"lowercase": {
|
|
||||||
"field": "device_type"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.dataset",
|
|
||||||
"value": "kismet.{{device_type}}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.dataset",
|
|
||||||
"value": "kismet.wds_ap",
|
|
||||||
"if": "ctx?.device_type == 'wds ap'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.dataset",
|
|
||||||
"value": "kismet.ad_hoc",
|
|
||||||
"if": "ctx?.device_type == 'ad-hoc'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.module",
|
|
||||||
"value": "kismet"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_packets_tx_total",
|
|
||||||
"target_field": "source.packets"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_num_alerts",
|
|
||||||
"target_field": "kismet.alerts.count"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_channel",
|
|
||||||
"target_field": "network.wireless.channel",
|
|
||||||
"if": "ctx?.message2?.kismet_device_base_channel != ''"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_frequency",
|
|
||||||
"target_field": "network.wireless.frequency",
|
|
||||||
"if": "ctx?.message2?.kismet_device_base_frequency != 0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_last_time",
|
|
||||||
"target_field": "kismet.last_seen"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"date": {
|
|
||||||
"field": "kismet.last_seen",
|
|
||||||
"formats": [
|
|
||||||
"epoch_second"
|
|
||||||
],
|
|
||||||
"target_field": "kismet.last_seen"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_first_time",
|
|
||||||
"target_field": "kismet.first_seen"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"date": {
|
|
||||||
"field": "kismet.first_seen",
|
|
||||||
"formats": [
|
|
||||||
"epoch_second"
|
|
||||||
],
|
|
||||||
"target_field": "kismet.first_seen"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_seenby",
|
|
||||||
"target_field": "kismet.seenby"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "kismet.seenby",
|
|
||||||
"processor": {
|
|
||||||
"pipeline": {
|
|
||||||
"name": "kismet.seenby"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_manuf",
|
|
||||||
"target_field": "device.manufacturer"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "{{event.dataset}}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"remove": {
|
|
||||||
"field": [
|
|
||||||
"message2",
|
|
||||||
"message",
|
|
||||||
"device_type",
|
|
||||||
"wifi",
|
|
||||||
"agent",
|
|
||||||
"host",
|
|
||||||
"event.created"
|
|
||||||
],
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "kismet.client"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "_ingest._value.kismet_common_seenby_num_packets",
|
|
||||||
"target_field": "_ingest._value.packets_seen",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "_ingest._value.kismet_common_seenby_uuid",
|
|
||||||
"target_field": "_ingest._value.serial_number",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "_ingest._value.kismet_common_seenby_first_time",
|
|
||||||
"target_field": "_ingest._value.first_seen",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "_ingest._value.kismet_common_seenby_last_time",
|
|
||||||
"target_field": "_ingest._value.last_seen",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"date": {
|
|
||||||
"field": "_ingest._value.first_seen",
|
|
||||||
"formats": [
|
|
||||||
"epoch_second"
|
|
||||||
],
|
|
||||||
"target_field": "_ingest._value.first_seen",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"date": {
|
|
||||||
"field": "_ingest._value.last_seen",
|
|
||||||
"formats": [
|
|
||||||
"epoch_second"
|
|
||||||
],
|
|
||||||
"target_field": "_ingest._value.last_seen",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_macaddr",
|
|
||||||
"target_field": "client.mac"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.kismet_device_base_commonname",
|
|
||||||
"target_field": "network.wireless.bssid"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "message2.dot11_device.dot11_device_associated_client_map",
|
|
||||||
"processor": {
|
|
||||||
"append": {
|
|
||||||
"field": "network.wireless.associated_clients",
|
|
||||||
"value": "{{_ingest._key}}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,389 +0,0 @@
|
|||||||
{
|
|
||||||
"description": "Pipeline for pfSense",
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "ecs.version",
|
|
||||||
"value": "8.10.0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "observer.vendor",
|
|
||||||
"value": "netgate"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "observer.type",
|
|
||||||
"value": "firewall"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message",
|
|
||||||
"target_field": "event.original"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.kind",
|
|
||||||
"value": "event"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.timezone",
|
|
||||||
"value": "{{_tmp.tz_offset}}",
|
|
||||||
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"grok": {
|
|
||||||
"description": "Parse syslog header",
|
|
||||||
"field": "event.original",
|
|
||||||
"patterns": [
|
|
||||||
"^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}"
|
|
||||||
],
|
|
||||||
"pattern_definitions": {
|
|
||||||
"ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?",
|
|
||||||
"TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})",
|
|
||||||
"BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:",
|
|
||||||
"BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))",
|
|
||||||
"NAME": "[[[:alnum:]]_-]+",
|
|
||||||
"SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})",
|
|
||||||
"TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?",
|
|
||||||
"OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})",
|
|
||||||
"PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})",
|
|
||||||
"BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+",
|
|
||||||
"META": "\\[[^\\]]*\\]"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"date": {
|
|
||||||
"if": "ctx._tmp.timestamp8601 != null",
|
|
||||||
"field": "_tmp.timestamp8601",
|
|
||||||
"target_field": "@timestamp",
|
|
||||||
"formats": [
|
|
||||||
"ISO8601"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"date": {
|
|
||||||
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
|
|
||||||
"field": "_tmp.timestamp",
|
|
||||||
"target_field": "@timestamp",
|
|
||||||
"formats": [
|
|
||||||
"MMM d HH:mm:ss",
|
|
||||||
"MMM d HH:mm:ss",
|
|
||||||
"MMM dd HH:mm:ss"
|
|
||||||
],
|
|
||||||
"timezone": "{{ event.timezone }}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"grok": {
|
|
||||||
"description": "Set Event Provider",
|
|
||||||
"field": "process.name",
|
|
||||||
"patterns": [
|
|
||||||
"^%{HYPHENATED_WORDS:event.provider}"
|
|
||||||
],
|
|
||||||
"pattern_definitions": {
|
|
||||||
"HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-firewall",
|
|
||||||
"if": "ctx.event.provider == 'filterlog'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-openvpn",
|
|
||||||
"if": "ctx.event.provider == 'openvpn'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-ipsec",
|
|
||||||
"if": "ctx.event.provider == 'charon'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-dhcp",
|
|
||||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-unbound",
|
|
||||||
"if": "ctx.event.provider == 'unbound'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-haproxy",
|
|
||||||
"if": "ctx.event.provider == 'haproxy'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-php-fpm",
|
|
||||||
"if": "ctx.event.provider == 'php-fpm'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-squid",
|
|
||||||
"if": "ctx.event.provider == 'squid'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log-1.16.0-suricata",
|
|
||||||
"if": "ctx.event.provider == 'suricata'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"drop": {
|
|
||||||
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "event.category",
|
|
||||||
"value": "network",
|
|
||||||
"if": "ctx.network != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"convert": {
|
|
||||||
"field": "source.address",
|
|
||||||
"target_field": "source.ip",
|
|
||||||
"type": "ip",
|
|
||||||
"ignore_failure": true,
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"convert": {
|
|
||||||
"field": "destination.address",
|
|
||||||
"target_field": "destination.ip",
|
|
||||||
"type": "ip",
|
|
||||||
"ignore_failure": true,
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "network.type",
|
|
||||||
"value": "ipv6",
|
|
||||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "network.type",
|
|
||||||
"value": "ipv4",
|
|
||||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"geoip": {
|
|
||||||
"field": "source.ip",
|
|
||||||
"target_field": "source.geo",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"geoip": {
|
|
||||||
"field": "destination.ip",
|
|
||||||
"target_field": "destination.geo",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"geoip": {
|
|
||||||
"ignore_missing": true,
|
|
||||||
"database_file": "GeoLite2-ASN.mmdb",
|
|
||||||
"field": "source.ip",
|
|
||||||
"target_field": "source.as",
|
|
||||||
"properties": [
|
|
||||||
"asn",
|
|
||||||
"organization_name"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"geoip": {
|
|
||||||
"database_file": "GeoLite2-ASN.mmdb",
|
|
||||||
"field": "destination.ip",
|
|
||||||
"target_field": "destination.as",
|
|
||||||
"properties": [
|
|
||||||
"asn",
|
|
||||||
"organization_name"
|
|
||||||
],
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "source.as.asn",
|
|
||||||
"target_field": "source.as.number",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "source.as.organization_name",
|
|
||||||
"target_field": "source.as.organization.name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "destination.as.asn",
|
|
||||||
"target_field": "destination.as.number",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "destination.as.organization_name",
|
|
||||||
"target_field": "destination.as.organization.name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"community_id": {
|
|
||||||
"target_field": "network.community_id",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"grok": {
|
|
||||||
"field": "observer.ingress.interface.name",
|
|
||||||
"patterns": [
|
|
||||||
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
|
|
||||||
],
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "network.vlan.id",
|
|
||||||
"copy_from": "observer.ingress.vlan.id",
|
|
||||||
"ignore_empty_value": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "related.ip",
|
|
||||||
"value": "{{destination.ip}}",
|
|
||||||
"allow_duplicates": false,
|
|
||||||
"if": "ctx.destination?.ip != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "related.ip",
|
|
||||||
"value": "{{source.ip}}",
|
|
||||||
"allow_duplicates": false,
|
|
||||||
"if": "ctx.source?.ip != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "related.ip",
|
|
||||||
"value": "{{source.nat.ip}}",
|
|
||||||
"allow_duplicates": false,
|
|
||||||
"if": "ctx.source?.nat?.ip != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "related.hosts",
|
|
||||||
"value": "{{destination.domain}}",
|
|
||||||
"if": "ctx.destination?.domain != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "related.user",
|
|
||||||
"value": "{{user.name}}",
|
|
||||||
"if": "ctx.user?.name != null"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "network.direction",
|
|
||||||
"value": "{{network.direction}}bound",
|
|
||||||
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"remove": {
|
|
||||||
"field": [
|
|
||||||
"_tmp"
|
|
||||||
],
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"lang": "painless",
|
|
||||||
"description": "This script processor iterates over the whole document to remove fields with null values.",
|
|
||||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"remove": {
|
|
||||||
"field": "event.original",
|
|
||||||
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
|
|
||||||
"ignore_failure": true,
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "logs-pfsense.log@custom",
|
|
||||||
"ignore_missing_pipeline": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"on_failure": [
|
|
||||||
{
|
|
||||||
"remove": {
|
|
||||||
"field": [
|
|
||||||
"_tmp"
|
|
||||||
],
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.kind",
|
|
||||||
"value": "pipeline_error"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "error.message",
|
|
||||||
"value": "{{{ _ingest.on_failure_message }}}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"_meta": {
|
|
||||||
"managed_by": "fleet",
|
|
||||||
"managed": true,
|
|
||||||
"package": {
|
|
||||||
"name": "pfsense"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
{
|
|
||||||
"description": "Pipeline for parsing pfSense Suricata logs.",
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "suricata.common"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"on_failure": [
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.kind",
|
|
||||||
"value": "pipeline_error"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"append": {
|
|
||||||
"field": "error.message",
|
|
||||||
"value": "{{{ _ingest.on_failure_message }}}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"_meta": {
|
|
||||||
"managed_by": "fleet",
|
|
||||||
"managed": true,
|
|
||||||
"package": {
|
|
||||||
"name": "pfsense"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -56,7 +56,6 @@
|
|||||||
{ "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }},
|
{ "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }},
|
||||||
{ "set": { "if": "ctx.scan?.yara?.matches instanceof List", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }},
|
{ "set": { "if": "ctx.scan?.yara?.matches instanceof List", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }},
|
||||||
{ "set": { "if": "ctx.rule?.name != null", "field": "event.dataset", "value": "alert", "override": true }},
|
{ "set": { "if": "ctx.rule?.name != null", "field": "event.dataset", "value": "alert", "override": true }},
|
||||||
{ "set": { "if": "ctx.rule?.name != null", "field": "rule.uuid", "value": "{{rule.name}}", "override": true }},
|
|
||||||
{ "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }},
|
{ "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }},
|
||||||
{ "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } },
|
{ "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } },
|
||||||
{ "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}},
|
{ "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}},
|
||||||
@@ -68,7 +67,6 @@
|
|||||||
{ "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } },
|
{ "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } },
|
||||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }},
|
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }},
|
||||||
{ "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }},
|
{ "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }},
|
||||||
{ "convert" : { "field" : "scan.pe.flags","type": "string", "ignore_missing":true }},
|
|
||||||
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
|
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
|
||||||
{ "pipeline": { "name": "common" } }
|
{ "pipeline": { "name": "common" } }
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
{
|
{
|
||||||
"description" : "suricata.alert",
|
"description" : "suricata.alert",
|
||||||
"processors" : [
|
"processors" : [
|
||||||
{ "set": { "field": "_index", "value": "logs-suricata.alerts-so" } },
|
|
||||||
{ "set": { "field": "tags","value": "alert" }},
|
{ "set": { "field": "tags","value": "alert" }},
|
||||||
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
||||||
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
|
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
|
|
||||||
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
|
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
{
|
|
||||||
"description" : "suricata.ike",
|
|
||||||
"processors" : [
|
|
||||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.alg_auth", "target_field": "ike.algorithm.authentication", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.alg_enc", "target_field": "ike.algorithm.encryption", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.alg_esn", "target_field": "ike.algorithm.esn", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.alg_dh", "target_field": "ike.algorithm.dh", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.alg_prf", "target_field": "ike.algorithm.prf", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.exchange_type", "target_field": "ike.exchange_type", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.payload", "target_field": "ike.payload", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.role", "target_field": "ike.role", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.init_spi", "target_field": "ike.spi.initiator", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.resp_spi", "target_field": "ike.spi.responder", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.version_major", "target_field": "ike.version.major", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.version_minor", "target_field": "ike.version.minor", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.ike.ikev2.errors", "target_field": "ike.ikev2.errors", "ignore_missing": true } },
|
|
||||||
{ "pipeline": { "name": "common" } }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
8
salt/elasticsearch/files/ingest/suricata.ikev2
Normal file
8
salt/elasticsearch/files/ingest/suricata.ikev2
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"description" : "suricata.ikev2",
|
||||||
|
"processors" : [
|
||||||
|
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||||
|
{ "pipeline": { "name": "common" } }
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -27,8 +27,7 @@
|
|||||||
"monitor",
|
"monitor",
|
||||||
"read",
|
"read",
|
||||||
"read_cross_cluster",
|
"read_cross_cluster",
|
||||||
"view_index_metadata",
|
"view_index_metadata"
|
||||||
"write"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -13,8 +13,7 @@
|
|||||||
"monitor",
|
"monitor",
|
||||||
"read",
|
"read",
|
||||||
"read_cross_cluster",
|
"read_cross_cluster",
|
||||||
"view_index_metadata",
|
"view_index_metadata"
|
||||||
"write"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -5,10 +5,6 @@ elasticsearch:
|
|||||||
esheap:
|
esheap:
|
||||||
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
|
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
index_clean:
|
|
||||||
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
|
|
||||||
forcedType: bool
|
|
||||||
helpLink: elasticsearch.html
|
|
||||||
retention:
|
retention:
|
||||||
retention_pct:
|
retention_pct:
|
||||||
decription: Total percentage of space used by Elasticsearch for multi node clusters
|
decription: Total percentage of space used by Elasticsearch for multi node clusters
|
||||||
@@ -49,28 +45,6 @@ elasticsearch:
|
|||||||
description: Max number of boolean clauses per query.
|
description: Max number of boolean clauses per query.
|
||||||
global: True
|
global: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
pipelines:
|
|
||||||
custom001: &pipelines
|
|
||||||
description:
|
|
||||||
description: Description of the ingest node pipeline
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
helpLink: elasticsearch.html
|
|
||||||
processors:
|
|
||||||
description: Processors for the ingest node pipeline
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
multiline: True
|
|
||||||
helpLink: elasticsearch.html
|
|
||||||
custom002: *pipelines
|
|
||||||
custom003: *pipelines
|
|
||||||
custom004: *pipelines
|
|
||||||
custom005: *pipelines
|
|
||||||
custom006: *pipelines
|
|
||||||
custom007: *pipelines
|
|
||||||
custom008: *pipelines
|
|
||||||
custom009: *pipelines
|
|
||||||
custom010: *pipelines
|
|
||||||
index_settings:
|
index_settings:
|
||||||
global_overrides:
|
global_overrides:
|
||||||
index_template:
|
index_template:
|
||||||
@@ -99,9 +73,12 @@ elasticsearch:
|
|||||||
description: The order to sort by. Must set index_sorting to True.
|
description: The order to sort by. Must set index_sorting to True.
|
||||||
global: True
|
global: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
policy:
|
|
||||||
phases:
|
phases:
|
||||||
hot:
|
hot:
|
||||||
|
max_age:
|
||||||
|
description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
|
||||||
|
global: True
|
||||||
|
helpLink: elasticsearch.html
|
||||||
actions:
|
actions:
|
||||||
set_priority:
|
set_priority:
|
||||||
priority:
|
priority:
|
||||||
@@ -120,9 +97,7 @@ elasticsearch:
|
|||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
cold:
|
cold:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
|
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
|
||||||
regex: ^[0-9]{1,5}d$
|
|
||||||
forcedType: string
|
|
||||||
global: True
|
global: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
actions:
|
actions:
|
||||||
@@ -133,8 +108,8 @@ elasticsearch:
|
|||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
warm:
|
warm:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier.
|
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
|
||||||
regex: ^[0-9]{1,5}d$
|
regex: ^\[0-9\]{1,5}d$
|
||||||
forcedType: string
|
forcedType: string
|
||||||
global: True
|
global: True
|
||||||
actions:
|
actions:
|
||||||
@@ -147,8 +122,6 @@ elasticsearch:
|
|||||||
delete:
|
delete:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
|
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
|
||||||
regex: ^[0-9]{1,5}d$
|
|
||||||
forcedType: string
|
|
||||||
global: True
|
global: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
so-logs: &indexSettings
|
so-logs: &indexSettings
|
||||||
@@ -275,9 +248,7 @@ elasticsearch:
|
|||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
warm:
|
warm:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier.
|
description: Minimum age of index. This determines when the index should be moved to the hot tier.
|
||||||
regex: ^[0-9]{1,5}d$
|
|
||||||
forcedType: string
|
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
@@ -302,9 +273,7 @@ elasticsearch:
|
|||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
cold:
|
cold:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
|
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
|
||||||
regex: ^[0-9]{1,5}d$
|
|
||||||
forcedType: string
|
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
@@ -319,8 +288,6 @@ elasticsearch:
|
|||||||
delete:
|
delete:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. This determines when the index should be deleted.
|
description: Minimum age of index. This determines when the index should be deleted.
|
||||||
regex: ^[0-9]{1,5}d$
|
|
||||||
forcedType: string
|
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
@@ -351,7 +318,6 @@ elasticsearch:
|
|||||||
so-logs-windows_x_powershell: *indexSettings
|
so-logs-windows_x_powershell: *indexSettings
|
||||||
so-logs-windows_x_powershell_operational: *indexSettings
|
so-logs-windows_x_powershell_operational: *indexSettings
|
||||||
so-logs-windows_x_sysmon_operational: *indexSettings
|
so-logs-windows_x_sysmon_operational: *indexSettings
|
||||||
so-logs-winlog_x_winlog: *indexSettings
|
|
||||||
so-logs-apache_x_access: *indexSettings
|
so-logs-apache_x_access: *indexSettings
|
||||||
so-logs-apache_x_error: *indexSettings
|
so-logs-apache_x_error: *indexSettings
|
||||||
so-logs-auditd_x_log: *indexSettings
|
so-logs-auditd_x_log: *indexSettings
|
||||||
@@ -376,17 +342,10 @@ elasticsearch:
|
|||||||
so-logs-azure_x_signinlogs: *indexSettings
|
so-logs-azure_x_signinlogs: *indexSettings
|
||||||
so-logs-azure_x_springcloudlogs: *indexSettings
|
so-logs-azure_x_springcloudlogs: *indexSettings
|
||||||
so-logs-barracuda_x_waf: *indexSettings
|
so-logs-barracuda_x_waf: *indexSettings
|
||||||
so-logs-cef_x_log: *indexSettings
|
|
||||||
so-logs-cisco_asa_x_log: *indexSettings
|
so-logs-cisco_asa_x_log: *indexSettings
|
||||||
so-logs-cisco_ftd_x_log: *indexSettings
|
so-logs-cisco_ftd_x_log: *indexSettings
|
||||||
so-logs-cisco_ios_x_log: *indexSettings
|
so-logs-cisco_ios_x_log: *indexSettings
|
||||||
so-logs-cisco_ise_x_log: *indexSettings
|
so-logs-cisco_ise_x_log: *indexSettings
|
||||||
so-logs-citrix_adc_x_interface: *indexSettings
|
|
||||||
so-logs-citrix_adc_x_lbvserver: *indexSettings
|
|
||||||
so-logs-citrix_adc_x_service: *indexSettings
|
|
||||||
so-logs-citrix_adc_x_system: *indexSettings
|
|
||||||
so-logs-citrix_adc_x_vpn: *indexSettings
|
|
||||||
so-logs-citrix_waf_x_log: *indexSettings
|
|
||||||
so-logs-cloudflare_x_audit: *indexSettings
|
so-logs-cloudflare_x_audit: *indexSettings
|
||||||
so-logs-cloudflare_x_logpull: *indexSettings
|
so-logs-cloudflare_x_logpull: *indexSettings
|
||||||
so-logs-crowdstrike_x_falcon: *indexSettings
|
so-logs-crowdstrike_x_falcon: *indexSettings
|
||||||
@@ -394,7 +353,6 @@ elasticsearch:
|
|||||||
so-logs-darktrace_x_ai_analyst_alert: *indexSettings
|
so-logs-darktrace_x_ai_analyst_alert: *indexSettings
|
||||||
so-logs-darktrace_x_model_breach_alert: *indexSettings
|
so-logs-darktrace_x_model_breach_alert: *indexSettings
|
||||||
so-logs-darktrace_x_system_status_alert: *indexSettings
|
so-logs-darktrace_x_system_status_alert: *indexSettings
|
||||||
so-logs-detections_x_alerts: *indexSettings
|
|
||||||
so-logs-f5_bigip_x_log: *indexSettings
|
so-logs-f5_bigip_x_log: *indexSettings
|
||||||
so-logs-fim_x_event: *indexSettings
|
so-logs-fim_x_event: *indexSettings
|
||||||
so-logs-fortinet_x_clientendpoint: *indexSettings
|
so-logs-fortinet_x_clientendpoint: *indexSettings
|
||||||
@@ -448,8 +406,6 @@ elasticsearch:
|
|||||||
so-logs-mysql_x_error: *indexSettings
|
so-logs-mysql_x_error: *indexSettings
|
||||||
so-logs-mysql_x_slowlog: *indexSettings
|
so-logs-mysql_x_slowlog: *indexSettings
|
||||||
so-logs-netflow_x_log: *indexSettings
|
so-logs-netflow_x_log: *indexSettings
|
||||||
so-logs-nginx_x_access: *indexSettings
|
|
||||||
so-logs-nginx_x_error: *indexSettings
|
|
||||||
so-logs-o365_x_audit: *indexSettings
|
so-logs-o365_x_audit: *indexSettings
|
||||||
so-logs-okta_x_system: *indexSettings
|
so-logs-okta_x_system: *indexSettings
|
||||||
so-logs-panw_x_panos: *indexSettings
|
so-logs-panw_x_panos: *indexSettings
|
||||||
@@ -515,16 +471,13 @@ elasticsearch:
|
|||||||
so-metrics-endpoint_x_metadata: *indexSettings
|
so-metrics-endpoint_x_metadata: *indexSettings
|
||||||
so-metrics-endpoint_x_metrics: *indexSettings
|
so-metrics-endpoint_x_metrics: *indexSettings
|
||||||
so-metrics-endpoint_x_policy: *indexSettings
|
so-metrics-endpoint_x_policy: *indexSettings
|
||||||
so-metrics-nginx_x_stubstatus: *indexSettings
|
|
||||||
so-case: *indexSettings
|
so-case: *indexSettings
|
||||||
so-common: *indexSettings
|
so-common: *indexSettings
|
||||||
so-endgame: *indexSettings
|
so-endgame: *indexSettings
|
||||||
so-idh: *indexSettings
|
so-idh: *indexSettings
|
||||||
so-suricata: *indexSettings
|
so-suricata: *indexSettings
|
||||||
so-suricata_x_alerts: *indexSettings
|
|
||||||
so-import: *indexSettings
|
so-import: *indexSettings
|
||||||
so-kratos: *indexSettings
|
so-kratos: *indexSettings
|
||||||
so-kismet: *indexSettings
|
|
||||||
so-logstash: *indexSettings
|
so-logstash: *indexSettings
|
||||||
so-redis: *indexSettings
|
so-redis: *indexSettings
|
||||||
so-strelka: *indexSettings
|
so-strelka: *indexSettings
|
||||||
|
|||||||
@@ -2,10 +2,12 @@
|
|||||||
{% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %}
|
{% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %}
|
||||||
|
|
||||||
{% set PILLAR_GLOBAL_OVERRIDES = {} %}
|
{% set PILLAR_GLOBAL_OVERRIDES = {} %}
|
||||||
{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings', {}) %}
|
{% if salt['pillar.get']('elasticsearch:index_settings') is defined %}
|
||||||
|
{% set ES_INDEX_PILLAR = salt['pillar.get']('elasticsearch:index_settings') %}
|
||||||
{% if ES_INDEX_PILLAR.global_overrides is defined %}
|
{% if ES_INDEX_PILLAR.global_overrides is defined %}
|
||||||
{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %}
|
{% set PILLAR_GLOBAL_OVERRIDES = ES_INDEX_PILLAR.pop('global_overrides') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
||||||
|
|
||||||
@@ -17,12 +19,6 @@
|
|||||||
{% set ES_INDEX_SETTINGS = {} %}
|
{% set ES_INDEX_SETTINGS = {} %}
|
||||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
||||||
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
|
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
|
||||||
{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #}
|
|
||||||
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
|
||||||
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
|
||||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if settings.index_template is defined %}
|
{% if settings.index_template is defined %}
|
||||||
{% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %}
|
{% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %}
|
||||||
{% do settings.index_template.template.settings.index.pop('sort') %}
|
{% do settings.index_template.template.settings.index.pop('sort') %}
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
{
|
|
||||||
"_meta": {
|
|
||||||
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-device.html",
|
|
||||||
"ecs_version": "1.12.2"
|
|
||||||
},
|
|
||||||
"template": {
|
|
||||||
"mappings": {
|
|
||||||
"properties": {
|
|
||||||
"device": {
|
|
||||||
"properties": {
|
|
||||||
"id": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"manufacturer": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"model": {
|
|
||||||
"properties": {
|
|
||||||
"identifier": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
{
|
|
||||||
"_meta": {
|
|
||||||
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-base.html",
|
|
||||||
"ecs_version": "1.12.2"
|
|
||||||
},
|
|
||||||
"template": {
|
|
||||||
"mappings": {
|
|
||||||
"properties": {
|
|
||||||
"kismet": {
|
|
||||||
"properties": {
|
|
||||||
"alerts": {
|
|
||||||
"properties": {
|
|
||||||
"count": {
|
|
||||||
"type": "long"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"first_seen": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"last_seen": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"seenby": {
|
|
||||||
"type": "nested"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -77,43 +77,6 @@
|
|||||||
"type": "keyword"
|
"type": "keyword"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"wireless": {
|
|
||||||
"properties": {
|
|
||||||
"associated_clients": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"bssid": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"channel": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"channel_utilization": {
|
|
||||||
"type": "float"
|
|
||||||
},
|
|
||||||
"frequency": {
|
|
||||||
"type": "double"
|
|
||||||
},
|
|
||||||
"ssid": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"ssid_cloaked": {
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"known_connected_bssid": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"last_connected_bssid": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
{
|
{"template": {
|
||||||
"template": {
|
|
||||||
"settings": {
|
"settings": {
|
||||||
"index": {
|
"index": {
|
||||||
"lifecycle": {
|
"lifecycle": {
|
||||||
@@ -380,4 +379,4 @@
|
|||||||
"managed_by": "fleet",
|
"managed_by": "fleet",
|
||||||
"managed": true
|
"managed": true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"template": {
|
|
||||||
"settings": {}
|
|
||||||
},
|
|
||||||
"_meta": {
|
|
||||||
"package": {
|
|
||||||
"name": "endpoint"
|
|
||||||
},
|
|
||||||
"managed_by": "fleet",
|
|
||||||
"managed": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,132 +0,0 @@
|
|||||||
{
|
|
||||||
"template": {
|
|
||||||
"settings": {
|
|
||||||
"index": {
|
|
||||||
"lifecycle": {
|
|
||||||
"name": "logs-endpoint.collection-diagnostic"
|
|
||||||
},
|
|
||||||
"codec": "best_compression",
|
|
||||||
"default_pipeline": "logs-endpoint.diagnostic.collection-8.10.2",
|
|
||||||
"mapping": {
|
|
||||||
"total_fields": {
|
|
||||||
"limit": "10000"
|
|
||||||
},
|
|
||||||
"ignore_malformed": "true"
|
|
||||||
},
|
|
||||||
"query": {
|
|
||||||
"default_field": [
|
|
||||||
"ecs.version",
|
|
||||||
"event.action",
|
|
||||||
"event.category",
|
|
||||||
"event.code",
|
|
||||||
"event.dataset",
|
|
||||||
"event.hash",
|
|
||||||
"event.id",
|
|
||||||
"event.kind",
|
|
||||||
"event.module",
|
|
||||||
"event.outcome",
|
|
||||||
"event.provider",
|
|
||||||
"event.type"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": {
|
|
||||||
"dynamic": false,
|
|
||||||
"properties": {
|
|
||||||
"@timestamp": {
|
|
||||||
"ignore_malformed": false,
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"ecs": {
|
|
||||||
"properties": {
|
|
||||||
"version": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"data_stream": {
|
|
||||||
"properties": {
|
|
||||||
"namespace": {
|
|
||||||
"type": "constant_keyword"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type": "constant_keyword"
|
|
||||||
},
|
|
||||||
"dataset": {
|
|
||||||
"type": "constant_keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"event": {
|
|
||||||
"properties": {
|
|
||||||
"severity": {
|
|
||||||
"type": "long"
|
|
||||||
},
|
|
||||||
"code": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"created": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"kind": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"module": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"sequence": {
|
|
||||||
"type": "long"
|
|
||||||
},
|
|
||||||
"ingested": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"provider": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"action": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"id": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"category": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"dataset": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"hash": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"outcome": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"_meta": {
|
|
||||||
"package": {
|
|
||||||
"name": "endpoint"
|
|
||||||
},
|
|
||||||
"managed_by": "fleet",
|
|
||||||
"managed": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
{
|
|
||||||
"template": {
|
|
||||||
"mappings": {
|
|
||||||
"properties": {
|
|
||||||
"error": {
|
|
||||||
"properties": {
|
|
||||||
"message": {
|
|
||||||
"type": "match_only_text"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"_meta": {
|
|
||||||
"package": {
|
|
||||||
"name": "system"
|
|
||||||
},
|
|
||||||
"managed_by": "fleet",
|
|
||||||
"managed": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
{
|
|
||||||
"template": {
|
|
||||||
"mappings": {
|
|
||||||
"dynamic": "strict",
|
|
||||||
"properties": {
|
|
||||||
"binary": {
|
|
||||||
"type": "binary"
|
|
||||||
},
|
|
||||||
"boolean": {
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"byte": {
|
|
||||||
"type": "byte"
|
|
||||||
},
|
|
||||||
"created_at": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"created_by": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"date": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"date_nanos": {
|
|
||||||
"type": "date_nanos"
|
|
||||||
},
|
|
||||||
"date_range": {
|
|
||||||
"type": "date_range"
|
|
||||||
},
|
|
||||||
"deserializer": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"double": {
|
|
||||||
"type": "double"
|
|
||||||
},
|
|
||||||
"double_range": {
|
|
||||||
"type": "double_range"
|
|
||||||
},
|
|
||||||
"float": {
|
|
||||||
"type": "float"
|
|
||||||
},
|
|
||||||
"float_range": {
|
|
||||||
"type": "float_range"
|
|
||||||
},
|
|
||||||
"geo_point": {
|
|
||||||
"type": "geo_point"
|
|
||||||
},
|
|
||||||
"geo_shape": {
|
|
||||||
"type": "geo_shape"
|
|
||||||
},
|
|
||||||
"half_float": {
|
|
||||||
"type": "half_float"
|
|
||||||
},
|
|
||||||
"integer": {
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"integer_range": {
|
|
||||||
"type": "integer_range"
|
|
||||||
},
|
|
||||||
"ip": {
|
|
||||||
"type": "ip"
|
|
||||||
},
|
|
||||||
"ip_range": {
|
|
||||||
"type": "ip_range"
|
|
||||||
},
|
|
||||||
"keyword": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"list_id": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"long": {
|
|
||||||
"type": "long"
|
|
||||||
},
|
|
||||||
"long_range": {
|
|
||||||
"type": "long_range"
|
|
||||||
},
|
|
||||||
"meta": {
|
|
||||||
"type": "object",
|
|
||||||
"enabled": false
|
|
||||||
},
|
|
||||||
"serializer": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"shape": {
|
|
||||||
"type": "shape"
|
|
||||||
},
|
|
||||||
"short": {
|
|
||||||
"type": "short"
|
|
||||||
},
|
|
||||||
"text": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"tie_breaker_id": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"updated_at": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"updated_by": {
|
|
||||||
"type": "keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"aliases": {}
|
|
||||||
},
|
|
||||||
"version": 2,
|
|
||||||
"_meta": {
|
|
||||||
"managed": true,
|
|
||||||
"description": "default mappings for the .items index template installed by Kibana/Security"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
{
|
|
||||||
"template": {
|
|
||||||
"mappings": {
|
|
||||||
"dynamic": "strict",
|
|
||||||
"properties": {
|
|
||||||
"created_at": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"created_by": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"deserializer": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"immutable": {
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"meta": {
|
|
||||||
"type": "object",
|
|
||||||
"enabled": false
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"serializer": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"tie_breaker_id": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"updated_at": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"updated_by": {
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"version": {
|
|
||||||
"type": "keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"aliases": {}
|
|
||||||
},
|
|
||||||
"version": 2,
|
|
||||||
"_meta": {
|
|
||||||
"managed": true,
|
|
||||||
"description": "default mappings for the .lists index template installed by Kibana/Security"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,154 +0,0 @@
|
|||||||
{
|
|
||||||
"template": {
|
|
||||||
"mappings": {
|
|
||||||
"properties": {
|
|
||||||
"so_audit_doc_id": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"@timestamp": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"so_kind": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"so_operation": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"so_detection": {
|
|
||||||
"properties": {
|
|
||||||
"publicId": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"title": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"severity": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"author": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"category": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"product": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"service": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"content": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"isEnabled": {
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"isReporting": {
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"isCommunity": {
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"tags": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"ruleset": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"engine": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"language": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"license": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"overrides": {
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"isEnabled": {
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
"createdAt": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"updatedAt": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"regex": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"value": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"thresholdType": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"track": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"ip": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"count": {
|
|
||||||
"type": "long"
|
|
||||||
},
|
|
||||||
"seconds": {
|
|
||||||
"type": "long"
|
|
||||||
},
|
|
||||||
"customFilter": {
|
|
||||||
"type": "text"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"so_detectioncomment": {
|
|
||||||
"properties": {
|
|
||||||
"createTime": {
|
|
||||||
"type": "date"
|
|
||||||
},
|
|
||||||
"detectionId": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
},
|
|
||||||
"value": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"userId": {
|
|
||||||
"ignore_above": 1024,
|
|
||||||
"type": "keyword"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"_meta": {
|
|
||||||
"ecs_version": "1.12.2"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
{
|
|
||||||
"template": {},
|
|
||||||
"version": 1,
|
|
||||||
"_meta": {
|
|
||||||
"description": "default settings for common Security Onion Detections indices"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,18 +14,15 @@
|
|||||||
},
|
},
|
||||||
"pe": {
|
"pe": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"flags": {
|
|
||||||
"type": "text"
|
|
||||||
},
|
|
||||||
"image_version": {
|
|
||||||
"type": "float"
|
|
||||||
},
|
|
||||||
"sections": {
|
"sections": {
|
||||||
"properties": {
|
"properties": {
|
||||||
"entropy": {
|
"entropy": {
|
||||||
"type": "float"
|
"type": "float"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"image_version": {
|
||||||
|
"type": "float"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user