Compare commits

..

1 Commits

Author SHA1 Message Date
m0duspwnens
50ab63162a users 2024-01-17 12:51:15 -05:00
743 changed files with 546159 additions and 264379 deletions

View File

@@ -536,10 +536,11 @@ secretGroup = 4
[allowlist] [allowlist]
description = "global allow lists" description = "global allow lists"
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''', '''integration_key\s=\s"so-logs-"'''] regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''']
paths = [ paths = [
'''gitleaks.toml''', '''gitleaks.toml''',
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
'''(go.mod|go.sum)$''', '''(go.mod|go.sum)$''',
'''salt/nginx/files/enterprise-attack.json''' '''salt/nginx/files/enterprise-attack.json'''
] ]

View File

@@ -1,198 +0,0 @@
body:
- type: markdown
attributes:
value: |
⚠️ This category is solely for conversations related to Security Onion 2.4 ⚠️
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
- type: dropdown
attributes:
label: Version
description: Which version of Security Onion 2.4.x are you asking about?
options:
-
- 2.4.10
- 2.4.20
- 2.4.30
- 2.4.40
- 2.4.50
- 2.4.60
- 2.4.70
- 2.4.80
- 2.4.90
- 2.4.100
- 2.4.110
- 2.4.111
- 2.4.120
- 2.4.130
- 2.4.140
- 2.4.141
- 2.4.150
- 2.4.160
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Method
description: How did you install Security Onion?
options:
-
- Security Onion ISO image
- Cloud image (Amazon, Azure, Google)
- Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc. (unsupported)
- Network installation on Ubuntu (unsupported)
- Network installation on Debian (unsupported)
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Description
description: >
Is this discussion about installation, configuration, upgrading, or other?
options:
-
- installation
- configuration
- upgrading
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Type
description: >
When you installed, did you choose Import, Eval, Standalone, Distributed, or something else?
options:
-
- Import
- Eval
- Standalone
- Distributed
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Location
description: >
Is this deployment in the cloud, on-prem with Internet access, or airgap?
options:
-
- cloud
- on-prem with Internet access
- airgap
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Hardware Specs
description: >
Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://docs.securityonion.net/en/2.4/hardware.html?
options:
-
- Meets minimum requirements
- Exceeds minimum requirements
- Does not meet minimum requirements
- other (please provide detail below)
validations:
required: true
- type: input
attributes:
label: CPU
description: How many CPU cores do you have?
validations:
required: true
- type: input
attributes:
label: RAM
description: How much RAM do you have?
validations:
required: true
- type: input
attributes:
label: Storage for /
description: How much storage do you have for the / partition?
validations:
required: true
- type: input
attributes:
label: Storage for /nsm
description: How much storage do you have for the /nsm partition?
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Collection
description: >
Are you collecting network traffic from a tap or span port?
options:
-
- tap
- span port
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Speeds
description: >
How much network traffic are you monitoring?
options:
-
- Less than 1Gbps
- 1Gbps to 10Gbps
- more than 10Gbps
validations:
required: true
- type: dropdown
attributes:
label: Status
description: >
Does SOC Grid show all services on all nodes as running OK?
options:
-
- Yes, all services on all nodes are running OK
- No, one or more services are failed (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Salt Status
description: >
Do you get any failures when you run "sudo salt-call state.highstate"?
options:
-
- Yes, there are salt failures (please provide detail below)
- No, there are no failures
validations:
required: true
- type: dropdown
attributes:
label: Logs
description: >
Are there any additional clues in /opt/so/log/?
options:
-
- Yes, there are additional clues in /opt/so/log/ (please provide detail below)
- No, there are no additional clues
validations:
required: true
- type: textarea
attributes:
label: Detail
description: Please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and then provide detailed information to help us help you.
placeholder: |-
STOP! Before typing, please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 in their entirety!
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
validations:
required: true
- type: checkboxes
attributes:
label: Guidelines
options:
- label: I have read the discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and assert that I have followed the guidelines.
required: true

12
.github/ISSUE_TEMPLATE vendored Normal file
View File

@@ -0,0 +1,12 @@
PLEASE STOP AND READ THIS INFORMATION!
If you are creating an issue just to ask a question, you will likely get faster and better responses by posting to our discussions forum instead:
https://securityonion.net/discuss
If you think you have found a possible bug or are observing a behavior that you weren't expecting, use the discussion forum to start a conversation about it instead of creating an issue.
If you are very familiar with the latest version of the product and are confident you have found a bug in Security Onion, you can continue with creating an issue here, but please make sure you have done the following:
- duplicated the issue on a fresh installation of the latest version
- provide information about your system and how you installed Security Onion
- include relevant log files
- include reproduction steps

View File

@@ -1,38 +0,0 @@
---
name: Bug report
about: This option is for experienced community members to report a confirmed, reproducible bug
title: ''
labels: ''
assignees: ''
---
PLEASE STOP AND READ THIS INFORMATION!
If you are creating an issue just to ask a question, you will likely get faster and better responses by posting to our discussions forum at https://securityonion.net/discuss.
If you think you have found a possible bug or are observing a behavior that you weren't expecting, use the discussion forum at https://securityonion.net/discuss to start a conversation about it instead of creating an issue.
If you are very familiar with the latest version of the product and are confident you have found a bug in Security Onion, you can continue with creating an issue here, but please make sure you have done the following:
- duplicated the issue on a fresh installation of the latest version
- provide information about your system and how you installed Security Onion
- include relevant log files
- include reproduction steps
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Security Onion Discussions
url: https://securityonion.com/discussions
about: Please ask and answer questions here

View File

@@ -1,33 +0,0 @@
name: 'Close Threads'
on:
schedule:
- cron: '50 1 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
discussions: write
concurrency:
group: lock-threads
jobs:
close-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: -1
days-before-issue-close: 60
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
days-before-pr-stale: 45
days-before-pr-close: 60
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."

View File

@@ -18,7 +18,7 @@ jobs:
with: with:
path-to-signatures: 'signatures_v1.json' path-to-signatures: 'signatures_v1.json'
path-to-document: 'https://securityonionsolutions.com/cla' path-to-document: 'https://securityonionsolutions.com/cla'
allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,defensivedepth,m0duspwnens allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,weslambert,defensivedepth,m0duspwnens
remote-organization-name: Security-Onion-Solutions remote-organization-name: Security-Onion-Solutions
remote-repository-name: licensing remote-repository-name: licensing

View File

@@ -1,26 +0,0 @@
name: 'Lock Threads'
on:
schedule:
- cron: '50 2 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
discussions: write
concurrency:
group: lock-threads
jobs:
lock-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest
steps:
- uses: jertel/lock-threads@main
with:
include-discussion-currently-open: true
discussion-inactive-days: 90
issue-inactive-days: 30
pr-inactive-days: 30

View File

@@ -1,6 +1,10 @@
name: python-test name: python-test
on: on:
push:
paths:
- "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin"
pull_request: pull_request:
paths: paths:
- "salt/sensoroni/files/analyzers/**" - "salt/sensoroni/files/analyzers/**"
@@ -13,7 +17,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.13"] python-version: ["3.10"]
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"] python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
steps: steps:
@@ -32,4 +36,4 @@ jobs:
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
- name: Test with pytest - name: Test with pytest
run: | run: |
PYTHONPATH=${{ matrix.python-code-path }} pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini

View File

@@ -1,17 +1,17 @@
### 2.4.160-20250625 ISO image released on 2025/06/25 ### 2.4.30-20231228 ISO image released on 2024/01/02
### Download and Verify ### Download and Verify
2.4.160-20250625 ISO image: 2.4.30-20231228 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
MD5: 78CF5602EFFAB84174C56AD2826E6E4E MD5: DBD47645CD6FA8358C51D8753046FB54
SHA1: FC7EEC3EC95D97D3337501BAA7CA8CAE7C0E15EA SHA1: 2494091065434ACB028F71444A5D16E8F8A11EDF
SHA256: 0ED965E8BEC80EE16AE90A0F0F96A3046CEF2D92720A587278DDDE3B656C01C2 SHA256: 3345AE1DC58AC7F29D82E60D9A36CDF8DE19B7DFF999D8C4F89C7BD36AEE7F1D
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,29 +25,27 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.160-20250625.iso.sig securityonion-2.4.160-20250625.iso gpg --verify securityonion-2.4.30-20231228.iso.sig securityonion-2.4.30-20231228.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Wed 25 Jun 2025 10:13:33 AM EDT using RSA key ID FE507013 gpg: Signature made Thu 28 Dec 2023 10:08:31 AM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013 Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
``` ```
If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network.
Once you've verified the ISO image, you're ready to proceed to our Installation guide: Once you've verified the ISO image, you're ready to proceed to our Installation guide:
https://docs.securityonion.net/en/2.4/installation.html https://docs.securityonion.net/en/2.4/installation.html

53
LICENSE
View File

@@ -1,53 +0,0 @@
Elastic License 2.0 (ELv2)
Acceptance
By using the software, you agree to all of the terms and conditions below.
Copyright License
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below.
Limitations
You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software.
You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key.
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensors trademarks is subject to applicable law.
Patents
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
Notices
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
No Other Rights
These terms do not imply any licenses other than those expressly granted in these terms.
Termination
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
No Liability
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
Definitions
The licensor is the entity offering these terms, and the software is the software the licensor makes available under these terms, including any portion of it.
you refers to the individual or entity agreeing to these terms.
your company is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. control means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
your licenses are all the licenses granted to you for the software under these terms.
use means anything you do with the software requiring one of your licenses.
trademark means trademarks, service marks, and similar rights.

View File

@@ -8,22 +8,19 @@ Alerts
![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png) ![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png)
Dashboards Dashboards
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_dashboards.png) ![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/51_dashboards.png)
Hunt Hunt
![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png) ![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/52_hunt.png)
Detections
![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_detections.png)
PCAP PCAP
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png) ![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_pcap.png)
Grid Grid
![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/75_grid.png) ![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_grid.png)
Config Config
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/87_config.png) ![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/61_config.png)
### Release Notes ### Release Notes

View File

@@ -5,11 +5,9 @@
| Version | Supported | | Version | Supported |
| ------- | ------------------ | | ------- | ------------------ |
| 2.4.x | :white_check_mark: | | 2.4.x | :white_check_mark: |
| 2.3.x | :x: | | 2.3.x | :white_check_mark: |
| 16.04.x | :x: | | 16.04.x | :x: |
Security Onion 2.3 has reached End Of Life and is no longer supported.
Security Onion 16.04 has reached End Of Life and is no longer supported. Security Onion 16.04 has reached End Of Life and is no longer supported.
## Reporting a Vulnerability ## Reporting a Vulnerability

View File

@@ -1 +1 @@
2.4.160 2.4.40

View File

@@ -19,4 +19,4 @@ role:
receiver: receiver:
standalone: standalone:
searchnode: searchnode:
sensor: sensor:

View File

@@ -41,8 +41,7 @@ file_roots:
base: base:
- /opt/so/saltstack/local/salt - /opt/so/saltstack/local/salt
- /opt/so/saltstack/default/salt - /opt/so/saltstack/default/salt
- /nsm/elastic-fleet/artifacts
- /opt/so/rules/nids
# The master_roots setting configures a master-only copy of the file_roots dictionary, # The master_roots setting configures a master-only copy of the file_roots dictionary,
# used by the state compiler. # used by the state compiler.

View File

@@ -1,34 +0,0 @@
{% set node_types = {} %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='elasticsearch:enabled:true',
fun='network.ip_addrs',
tgt_type='pillar') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
elasticsearch:
nodes:
{% for node_type, values in node_types.items() %}
{{node_type}}:
{% for hostname, ip in values.items() %}
{{hostname}}:
ip: {{ip}}
{% endfor %}
{% endfor %}

View File

@@ -1,2 +0,0 @@
kafka:
nodes:

View File

@@ -1,15 +1,16 @@
{% set node_types = {} %} {% set node_types = {} %}
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
{% for minionid, ip in salt.saltutil.runner( {% for minionid, ip in salt.saltutil.runner(
'mine.get', 'mine.get',
tgt='logstash:enabled:true', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ',
fun='network.ip_addrs', fun='network.ip_addrs',
tgt_type='pillar') | dictsort() tgt_type='compound') | dictsort()
%} %}
# only add a node to the pillar if it returned an ip from the mine # only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%} {% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %} {% set hostname = cached_grains[minionid]['host'] %}
{% set node_type = minionid.split('_') | last %} {% set node_type = minionid.split('_')[1] %}
{% if node_type not in node_types.keys() %} {% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %} {% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %} {% else %}

View File

@@ -24,7 +24,6 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% if node_types %}
node_data: node_data:
{% for node_type, host_values in node_types.items() %} {% for node_type, host_values in node_types.items() %}
{% for hostname, details in host_values.items() %} {% for hostname, details in host_values.items() %}
@@ -34,6 +33,3 @@ node_data:
role: {{node_type}} role: {{node_type}}
{% endfor %} {% endfor %}
{% endfor %} {% endfor %}
{% else %}
node_data: False
{% endif %}

View File

@@ -1,34 +0,0 @@
{% set node_types = {} %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='redis:enabled:true',
fun='network.ip_addrs',
tgt_type='pillar') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
redis:
nodes:
{% for node_type, values in node_types.items() %}
{{node_type}}:
{% for hostname, ip in values.items() %}
{{hostname}}:
ip: {{ip}}
{% endfor %}
{% endfor %}

View File

@@ -16,18 +16,17 @@ base:
- sensoroni.adv_sensoroni - sensoroni.adv_sensoroni
- telegraf.soc_telegraf - telegraf.soc_telegraf
- telegraf.adv_telegraf - telegraf.adv_telegraf
- versionlock.soc_versionlock - users
- versionlock.adv_versionlock
'* and not *_desktop': '* and not *_desktop':
- firewall.soc_firewall - firewall.soc_firewall
- firewall.adv_firewall - firewall.adv_firewall
- nginx.soc_nginx - nginx.soc_nginx
- nginx.adv_nginx - nginx.adv_nginx
- node_data.ips
'*_manager or *_managersearch': '*_manager or *_managersearch':
- match: compound - match: compound
- node_data.ips
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth - elasticsearch.auth
{% endif %} {% endif %}
@@ -45,18 +44,16 @@ base:
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license - soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos - kratos.adv_kratos
- hydra.soc_hydra
- hydra.adv_hydra
- redis.nodes
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
- influxdb.adv_influxdb - influxdb.adv_influxdb
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -65,12 +62,10 @@ base:
- elastalert.adv_elastalert - elastalert.adv_elastalert
- backup.soc_backup - backup.soc_backup
- backup.adv_backup - backup.adv_backup
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- stig.soc_stig
'*_sensor': '*_sensor':
- healthcheck.sensor - healthcheck.sensor
@@ -86,11 +81,8 @@ base:
- suricata.adv_suricata - suricata.adv_suricata
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
'*_eval': '*_eval':
- node_data.ips
- secrets - secrets
- healthcheck.eval - healthcheck.eval
- elasticsearch.index_templates - elasticsearch.index_templates
@@ -101,7 +93,6 @@ base:
- kibana.secrets - kibana.secrets
{% endif %} {% endif %}
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -115,12 +106,14 @@ base:
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license - soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- strelka.soc_strelka - strelka.soc_strelka
- strelka.adv_strelka - strelka.adv_strelka
- hydra.soc_hydra - kratos.soc_kratos
- hydra.adv_hydra - kratos.adv_kratos
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
@@ -139,7 +132,6 @@ base:
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
'*_standalone': '*_standalone':
- node_data.ips
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
@@ -156,14 +148,10 @@ base:
- idstools.adv_idstools - idstools.adv_idstools
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos - kratos.adv_kratos
- hydra.soc_hydra
- hydra.adv_hydra
- redis.nodes
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
- influxdb.adv_influxdb - influxdb.adv_influxdb
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -175,6 +163,8 @@ base:
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license - soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- strelka.soc_strelka - strelka.soc_strelka
@@ -191,10 +181,6 @@ base:
- suricata.adv_suricata - suricata.adv_suricata
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_heavynode': '*_heavynode':
- elasticsearch.auth - elasticsearch.auth
@@ -228,22 +214,15 @@ base:
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth - elasticsearch.auth
{% endif %} {% endif %}
- redis.nodes
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_receiver': '*_receiver':
- logstash.nodes - logstash.nodes
@@ -256,13 +235,8 @@ base:
- redis.adv_redis - redis.adv_redis
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- soc.license
'*_import': '*_import':
- node_data.ips
- secrets - secrets
- elasticsearch.index_templates - elasticsearch.index_templates
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
@@ -272,7 +246,6 @@ base:
- kibana.secrets - kibana.secrets
{% endif %} {% endif %}
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -284,12 +257,14 @@ base:
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license - soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- backup.soc_backup - backup.soc_backup
- backup.adv_backup - backup.adv_backup
- hydra.soc_hydra - kratos.soc_kratos
- hydra.adv_hydra - kratos.adv_kratos
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
@@ -308,7 +283,6 @@ base:
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
'*_fleet': '*_fleet':
- node_data.ips
- backup.soc_backup - backup.soc_backup
- backup.adv_backup - backup.adv_backup
- logstash.nodes - logstash.nodes
@@ -322,5 +296,3 @@ base:
'*_desktop': '*_desktop':
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license

2
pillar/users/init.sls Normal file
View File

@@ -0,0 +1,2 @@
# users pillar goes in /opt/so/saltstack/local/pillar/users/init.sls
# the users directory may need to be created under /opt/so/saltstack/local/pillar

View File

@@ -0,0 +1,18 @@
users:
sclapton:
# required fields
status: present
# node_access determines which node types the user can access.
# this can either be by grains.role or by final part of the minion id after the _
node_access:
- standalone
- searchnode
# optional fields
fullname: Stevie Claptoon
uid: 1001
gid: 1001
homephone: does not have a phone
groups:
- mygroup1
- mygroup2
- wheel # give sudo access

20
pillar/users/pillar.usage Normal file
View File

@@ -0,0 +1,20 @@
users:
sclapton:
# required fields
status: <present | absent>
# node_access determines which node types the user can access.
# this can either be by grains.role or by final part of the minion id after the _
node_access:
- standalone
- searchnode
# optional fields
fullname: <string>
uid: <integer>
gid: <integer>
roomnumber: <string>
workphone: <string>
homephone: <string>
groups:
- <string>
- <string>
- wheel # give sudo access

14
pyci.sh
View File

@@ -15,16 +15,12 @@ TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin PATH=$PATH:/usr/local/bin
if [ ! -d .venv ]; then if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
python -m venv .venv echo "Missing dependencies. Consider running the following command:"
fi echo " python -m pip install flake8 pytest pytest-cov"
source .venv/bin/activate
if ! pip install flake8 pytest pytest-cov pyyaml; then
echo "Unable to install dependencies."
exit 1 exit 1
fi fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini" flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"

View File

@@ -24,7 +24,6 @@
'influxdb', 'influxdb',
'soc', 'soc',
'kratos', 'kratos',
'hydra',
'elasticfleet', 'elasticfleet',
'elastic-fleet-package-registry', 'elastic-fleet-package-registry',
'firewall', 'firewall',
@@ -35,6 +34,7 @@
'suricata', 'suricata',
'utility', 'utility',
'schedule', 'schedule',
'soctopus',
'tcpreplay', 'tcpreplay',
'docker_clean' 'docker_clean'
], ],
@@ -66,10 +66,8 @@
'registry', 'registry',
'manager', 'manager',
'nginx', 'nginx',
'strelka.manager',
'soc', 'soc',
'kratos', 'kratos',
'hydra',
'influxdb', 'influxdb',
'telegraf', 'telegraf',
'firewall', 'firewall',
@@ -94,10 +92,8 @@
'nginx', 'nginx',
'telegraf', 'telegraf',
'influxdb', 'influxdb',
'strelka.manager',
'soc', 'soc',
'kratos', 'kratos',
'hydra',
'elasticfleet', 'elasticfleet',
'elastic-fleet-package-registry', 'elastic-fleet-package-registry',
'firewall', 'firewall',
@@ -105,9 +101,8 @@
'suricata.manager', 'suricata.manager',
'utility', 'utility',
'schedule', 'schedule',
'docker_clean', 'soctopus',
'stig', 'docker_clean'
'kafka'
], ],
'so-managersearch': [ 'so-managersearch': [
'salt.master', 'salt.master',
@@ -117,10 +112,8 @@
'nginx', 'nginx',
'telegraf', 'telegraf',
'influxdb', 'influxdb',
'strelka.manager',
'soc', 'soc',
'kratos', 'kratos',
'hydra',
'elastic-fleet-package-registry', 'elastic-fleet-package-registry',
'elasticfleet', 'elasticfleet',
'firewall', 'firewall',
@@ -129,9 +122,8 @@
'suricata.manager', 'suricata.manager',
'utility', 'utility',
'schedule', 'schedule',
'docker_clean', 'soctopus',
'stig', 'docker_clean'
'kafka'
], ],
'so-searchnode': [ 'so-searchnode': [
'ssl', 'ssl',
@@ -139,10 +131,7 @@
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
'docker_clean', 'docker_clean'
'stig',
'kafka.ca',
'kafka.ssl'
], ],
'so-standalone': [ 'so-standalone': [
'salt.master', 'salt.master',
@@ -155,7 +144,6 @@
'influxdb', 'influxdb',
'soc', 'soc',
'kratos', 'kratos',
'hydra',
'elastic-fleet-package-registry', 'elastic-fleet-package-registry',
'elasticfleet', 'elasticfleet',
'firewall', 'firewall',
@@ -166,10 +154,9 @@
'healthcheck', 'healthcheck',
'utility', 'utility',
'schedule', 'schedule',
'soctopus',
'tcpreplay', 'tcpreplay',
'docker_clean', 'docker_clean'
'stig',
'kafka'
], ],
'so-sensor': [ 'so-sensor': [
'ssl', 'ssl',
@@ -181,15 +168,13 @@
'healthcheck', 'healthcheck',
'schedule', 'schedule',
'tcpreplay', 'tcpreplay',
'docker_clean', 'docker_clean'
'stig'
], ],
'so-fleet': [ 'so-fleet': [
'ssl', 'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'logstash', 'logstash',
'nginx',
'healthcheck', 'healthcheck',
'schedule', 'schedule',
'elasticfleet', 'elasticfleet',
@@ -200,18 +185,19 @@
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
'docker_clean', 'docker_clean'
'kafka',
'stig'
], ],
'so-desktop': [ 'so-desktop': [
'ssl', 'ssl',
'docker_clean', 'docker_clean',
'telegraf', 'telegraf'
'stig'
], ],
}, grain='role') %} }, grain='role') %}
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
{% do allowed_states.append('mysql') %}
{% endif %}
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %} {%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
{% do allowed_states.append('zeek') %} {% do allowed_states.append('zeek') %}
{%- endif %} {%- endif %}
@@ -237,6 +223,10 @@
{% do allowed_states.append('elastalert') %} {% do allowed_states.append('elastalert') %}
{% endif %} {% endif %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
{% do allowed_states.append('playbook') %}
{% endif %}
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %} {% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
{% do allowed_states.append('logstash') %} {% do allowed_states.append('logstash') %}
{% endif %} {% endif %}

View File

@@ -4,5 +4,4 @@ backup:
- /etc/pki - /etc/pki
- /etc/salt - /etc/salt
- /nsm/kratos - /nsm/kratos
- /nsm/hydra
destination: "/nsm/backup" destination: "/nsm/backup"

View File

@@ -1,10 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% if GLOBALS.pcap_engine == "TRANSITION" %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %} {% import 'bpf/macros.jinja' as MACROS %}
{% else %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %} {% set PCAPBPF = BPFMERGED.pcap %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %}
{% endif %}

View File

@@ -1,6 +1,6 @@
bpf: bpf:
pcap: pcap:
description: List of BPF filters to apply to Stenographer. description: List of BPF filters to apply to PCAP.
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: bpf.html helpLink: bpf.html

View File

@@ -1,3 +1,6 @@
mine_functions:
x509.get_pem_entries: [/etc/pki/ca.crt]
x509_signing_policies: x509_signing_policies:
filebeat: filebeat:
- minions: '*' - minions: '*'
@@ -67,17 +70,3 @@ x509_signing_policies:
- authorityKeyIdentifier: keyid,issuer:always - authorityKeyIdentifier: keyid,issuer:always
- days_valid: 820 - days_valid: 820
- copypath: /etc/pki/issued_certs/ - copypath: /etc/pki/issued_certs/
kafka:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "digitalSignature, keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- extendedKeyUsage: "serverAuth, clientAuth"
- days_valid: 820
- copypath: /etc/pki/issued_certs/

View File

@@ -4,6 +4,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- common.soup_scripts
- common.packages - common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state - manager.elasticsearch # needed for elastic_curl_config state
@@ -14,11 +15,6 @@ net.core.wmem_default:
sysctl.present: sysctl.present:
- value: 26214400 - value: 26214400
# Users are not a fan of console messages
kernel.printk:
sysctl.present:
- value: "3 4 1 3"
# Remove variables.txt from /tmp - This is temp # Remove variables.txt from /tmp - This is temp
rmvariablesfile: rmvariablesfile:
file.absent: file.absent:
@@ -128,11 +124,6 @@ common_sbin:
- user: 939 - user: 939
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
- show_changes: False
{% if GLOBALS.role == 'so-heavynode' %}
- exclude_pat:
- so-pcap-import
{% endif %}
common_sbin_jinja: common_sbin_jinja:
file.recurse: file.recurse:
@@ -142,33 +133,6 @@ common_sbin_jinja:
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
- template: jinja - template: jinja
- show_changes: False
{% if GLOBALS.role == 'so-heavynode' %}
- exclude_pat:
- so-import-pcap
{% endif %}
{% if GLOBALS.role == 'so-heavynode' %}
remove_so-pcap-import_heavynode:
file.absent:
- name: /usr/sbin/so-pcap-import
remove_so-import-pcap_heavynode:
file.absent:
- name: /usr/sbin/so-import-pcap
{% endif %}
{% if not GLOBALS.is_manager%}
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
# these two states remove the scripts from non manager nodes
remove_soup:
file.absent:
- name: /usr/sbin/soup
remove_so-firewall:
file.absent:
- name: /usr/sbin/so-firewall
{% endif %}
so-status_script: so-status_script:
file.managed: file.managed:
@@ -202,7 +166,6 @@ sostatus_log:
file.managed: file.managed:
- name: /opt/so/log/sostatus/status.log - name: /opt/so/log/sostatus/status.log
- mode: 644 - mode: 644
- replace: False
# Install sostatus check cron. This is used to populate Grid. # Install sostatus check cron. This is used to populate Grid.
so-status_check_cron: so-status_check_cron:

View File

@@ -27,7 +27,6 @@ commonpkgs:
- vim - vim
- tar - tar
- unzip - unzip
- bc
{% if grains.oscodename != 'focal' %} {% if grains.oscodename != 'focal' %}
- python3-rich - python3-rich
{% endif %} {% endif %}
@@ -57,7 +56,6 @@ commonpkgs:
- skip_suggestions: True - skip_suggestions: True
- pkgs: - pkgs:
- python3-dnf-plugin-versionlock - python3-dnf-plugin-versionlock
- bc
- curl - curl
- device-mapper-persistent-data - device-mapper-persistent-data
- fuse - fuse

View File

@@ -1,142 +1,23 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Sync some Utilities
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at soup_scripts:
# https://securityonion.net/license; you may not use this file except in compliance with the file.recurse:
# Elastic License 2.0. - name: /usr/sbin
- user: root
- group: root
- file_mode: 755
- source: salt://common/tools/sbin
- include_pat:
- so-common
- so-image-common
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %} soup_manager_scripts:
file.recurse:
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} - name: /usr/sbin
{% if SOC_GLOBAL.global.airgap %} - user: root
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} - group: root
{% else %} - file_mode: 755
{% set UPDATE_DIR='/tmp/sogh/securityonion' %} - source: salt://manager/tools/sbin
{% endif %} - include_pat:
{% set SOVERSION = salt['file.read']('/etc/soversion').strip() %} - so-firewall
- so-repo-sync
remove_common_soup: - soup
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
remove_common_so-firewall:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
# This section is used to put the scripts in place in the Salt file system
# in case a state run tries to overwrite what we do in the next section.
copy_so-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
copy_so-repo-sync_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- preserve: True
copy_bootstrap-salt_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/salt/scripts/bootstrap-salt.sh
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
- preserve: True
# This section is used to put the new script in place so that it can be called during soup.
# It is faster than calling the states that normally manage them to put them in place.
copy_so-common_sbin:
file.copy:
- name: /usr/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_sbin:
file.copy:
- name: /usr/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_sbin:
file.copy:
- name: /usr/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_sbin:
file.copy:
- name: /usr/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_sbin:
file.copy:
- name: /usr/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
copy_so-repo-sync_sbin:
file.copy:
- name: /usr/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- force: True
- preserve: True
copy_bootstrap-salt_sbin:
file.copy:
- name: /usr/sbin/bootstrap-salt.sh
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
- force: True
- preserve: True
{# this is added in 2.4.120 to remove salt repo files pointing to saltproject.io to accomodate the move to broadcom and new bootstrap-salt script #}
{% if salt['pkg.version_cmp'](SOVERSION, '2.4.120') == -1 %}
{% set saltrepofile = '/etc/yum.repos.d/salt.repo' %}
{% if grains.os_family == 'Debian' %}
{% set saltrepofile = '/etc/apt/sources.list.d/salt.list' %}
{% endif %}
remove_saltproject_io_repo_manager:
file.absent:
- name: {{ saltrepofile }}
{% endif %}
{% else %}
fix_23_soup_sbin:
cmd.run:
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
fix_23_soup_salt:
cmd.run:
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
{% endif %}

View File

@@ -5,13 +5,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
cat << EOF salt-call state.highstate -l info
so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see:
https://docs.securityonion.net/en/2.4/salt.html
EOF
salt-call state.highstate -l info queue=True

View File

@@ -8,6 +8,12 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a # Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files, # script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file. # and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
DEFAULT_SALT_DIR=/opt/so/saltstack/default DEFAULT_SALT_DIR=/opt/so/saltstack/default
DOC_BASE_URL="https://docs.securityonion.net/en/2.4" DOC_BASE_URL="https://docs.securityonion.net/en/2.4"
@@ -25,11 +31,6 @@ if ! echo "$PATH" | grep -q "/usr/sbin"; then
export PATH="$PATH:/usr/sbin" export PATH="$PATH:/usr/sbin"
fi fi
# See if a proxy is set. If so use it.
if [ -f /etc/profile.d/so-proxy.sh ]; then
. /etc/profile.d/so-proxy.sh
fi
# Define a banner to separate sections # Define a banner to separate sections
banner="=========================================================================" banner="========================================================================="
@@ -99,17 +100,6 @@ add_interface_bond0() {
fi fi
} }
airgap_playbooks() {
SRC_DIR=$1
# Copy playbooks if using airgap
mkdir -p /nsm/airgap-resources
# Purge old airgap playbooks to ensure SO only uses the latest released playbooks
rm -fr /nsm/airgap-resources/playbooks
tar xf $SRC_DIR/airgap-resources/playbooks.tgz -C /nsm/airgap-resources/
chown -R socore:socore /nsm/airgap-resources/playbooks
git config --global --add safe.directory /nsm/airgap-resources/playbooks
}
check_container() { check_container() {
docker ps | grep "$1:" > /dev/null 2>&1 docker ps | grep "$1:" > /dev/null 2>&1
return $? return $?
@@ -179,46 +169,6 @@ check_salt_minion_status() {
return $status return $status
} }
# Compare es versions and return the highest version
compare_es_versions() {
# Save the original IFS
local OLD_IFS="$IFS"
IFS=.
local i ver1=($1) ver2=($2)
# Restore the original IFS
IFS="$OLD_IFS"
# Determine the maximum length between the two version arrays
local max_len=${#ver1[@]}
if [[ ${#ver2[@]} -gt $max_len ]]; then
max_len=${#ver2[@]}
fi
# Compare each segment of the versions
for ((i=0; i<max_len; i++)); do
# If a segment in ver1 or ver2 is missing, set it to 0
if [[ -z ${ver1[i]} ]]; then
ver1[i]=0
fi
if [[ -z ${ver2[i]} ]]; then
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
echo "$1"
return 0
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
echo "$2"
return 0
fi
done
echo "$1" # If versions are equal, return either
return 0
}
copy_new_files() { copy_new_files() {
# Copy new files over to the salt dir # Copy new files over to the salt dir
cd $UPDATE_DIR cd $UPDATE_DIR
@@ -229,21 +179,6 @@ copy_new_files() {
cd /tmp cd /tmp
} }
create_local_directories() {
echo "Creating local pillar and salt directories if needed"
PILLARSALTDIR=$1
local_salt_dir="/opt/so/saltstack/local"
for i in "pillar" "salt"; do
for d in $(find $PILLARSALTDIR/$i -type d); do
suffixdir=${d//$PILLARSALTDIR/}
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
mkdir -p $local_salt_dir$suffixdir
fi
done
chown -R socore:socore $local_salt_dir/$i
done
}
disable_fastestmirror() { disable_fastestmirror() {
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
} }
@@ -308,31 +243,6 @@ fail() {
exit 1 exit 1
} }
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}' | sed 's/,//')
[[ -z "$AGENTCOUNT" ]] && AGENTCOUNT="0"
else
AGENTCOUNT=0
fi
}
get_elastic_agent_vars() {
local path="${1:-/opt/so/saltstack/default}"
local defaultsfile="${path}/salt/elasticsearch/defaults.yaml"
if [ -f "$defaultsfile" ]; then
ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]')
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
else
fail "Could not find salt/elasticsearch/defaults.yaml"
fi
}
get_random_value() { get_random_value() {
length=${1:-20} length=${1:-20}
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1 head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
@@ -419,7 +329,7 @@ lookup_salt_value() {
local="" local=""
fi fi
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local} salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
} }
lookup_pillar() { lookup_pillar() {
@@ -456,13 +366,6 @@ is_feature_enabled() {
return 1 return 1
} }
read_feat() {
if [ -f /opt/so/log/sostatus/lks_enabled ]; then
lic_id=$(cat /opt/so/saltstack/local/pillar/soc/license.sls | grep license_id: | awk '{print $2}')
echo "$lic_id/$(cat /opt/so/log/sostatus/lks_enabled)/$(cat /opt/so/log/sostatus/fps_enabled)"
fi
}
require_manager() { require_manager() {
if is_manager_node; then if is_manager_node; then
echo "This is a manager, so we can proceed." echo "This is a manager, so we can proceed."
@@ -656,15 +559,6 @@ status () {
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n" printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
} }
sync_options() {
set_version
set_os
salt_minion_count
get_agent_count
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
}
systemctl_func() { systemctl_func() {
local action=$1 local action=$1
local echo_action=$1 local echo_action=$1
@@ -689,8 +583,6 @@ has_uppercase() {
} }
update_elastic_agent() { update_elastic_agent() {
local path="${1:-/opt/so/saltstack/default}"
get_elastic_agent_vars "$path"
echo "Checking if Elastic Agent update is necessary..." echo "Checking if Elastic Agent update is necessary..."
download_and_verify "$ELASTIC_AGENT_URL" "$ELASTIC_AGENT_MD5_URL" "$ELASTIC_AGENT_FILE" "$ELASTIC_AGENT_MD5" "$ELASTIC_AGENT_EXPANSION_DIR" download_and_verify "$ELASTIC_AGENT_URL" "$ELASTIC_AGENT_MD5_URL" "$ELASTIC_AGENT_FILE" "$ELASTIC_AGENT_MD5" "$ELASTIC_AGENT_EXPANSION_DIR"
} }

View File

@@ -8,7 +8,6 @@
import sys import sys
import subprocess import subprocess
import os import os
import json
sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/') sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/')
import salt.config import salt.config
@@ -37,67 +36,17 @@ def check_needs_restarted():
with open(outfile, 'w') as f: with open(outfile, 'w') as f:
f.write(val) f.write(val)
def check_for_fps():
feat = 'fps'
feat_full = feat.replace('ps', 'ips')
fps = 0
try:
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
if result.returncode == 0:
fps = 1
except:
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
try:
with open(fn, 'r') as f:
contents = f.read()
if '1' in contents:
fps = 1
except:
# Unknown, so assume 0
fps = 0
with open('/opt/so/log/sostatus/fps_enabled', 'w') as f:
f.write(str(fps))
def check_for_lks():
feat = 'Lks'
feat_full = feat.replace('ks', 'uks')
lks = 0
result = subprocess.run(['lsblk', '-p', '-J'], check=True, stdout=subprocess.PIPE)
data = json.loads(result.stdout)
for device in data['blockdevices']:
if 'children' in device:
for gc in device['children']:
if 'children' in gc:
try:
arg = 'is' + feat_full
result = subprocess.run(['cryptsetup', arg, gc['name']], stdout=subprocess.PIPE)
if result.returncode == 0:
lks = 1
except FileNotFoundError:
for ggc in gc['children']:
if 'crypt' in ggc['type']:
lks = 1
if lks:
break
with open('/opt/so/log/sostatus/lks_enabled', 'w') as f:
f.write(str(lks))
def fail(msg): def fail(msg):
print(msg, file=sys.stderr) print(msg, file=sys.stderr)
sys.exit(1) sys.exit(1)
def main(): def main():
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8") proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
if proc.stdout.strip() != "0": if proc.stdout.strip() != "0":
fail("This program must be run as root") fail("This program must be run as root")
# Ensure that umask is 0022 so that files created by this script have rw-r-r permissions
org_umask = os.umask(0o022)
check_needs_restarted() check_needs_restarted()
check_for_fps()
check_for_lks()
# Restore umask to whatever value was set before this script was run. SXIG sets to 0077 rw---
os.umask(org_umask)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -4,16 +4,22 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
import sys, argparse, re, subprocess, json
import sys, argparse, re, docker
from packaging.version import Version, InvalidVersion from packaging.version import Version, InvalidVersion
from itertools import groupby, chain from itertools import groupby, chain
def get_image_name(string) -> str: def get_image_name(string) -> str:
return ':'.join(string.split(':')[:-1]) return ':'.join(string.split(':')[:-1])
def get_so_image_basename(string) -> str: def get_so_image_basename(string) -> str:
return get_image_name(string).split('/so-')[-1] return get_image_name(string).split('/so-')[-1]
def get_image_version(string) -> str: def get_image_version(string) -> str:
ver = string.split(':')[-1] ver = string.split(':')[-1]
if ver == 'latest': if ver == 'latest':
@@ -29,75 +35,56 @@ def get_image_version(string) -> str:
return '999999.9.9' return '999999.9.9'
return ver return ver
def run_command(command):
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if process.returncode != 0:
print(f"Error executing command: {command}", file=sys.stderr)
print(f"Error message: {process.stderr}", file=sys.stderr)
exit(1)
return process.stdout
def main(quiet): def main(quiet):
client = docker.from_env()
# Prune old/stopped containers
if not quiet: print('Pruning old containers')
client.containers.prune()
image_list = client.images.list(filters={ 'dangling': False })
# Map list of image objects to flattened list of tags (format: "name:version")
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
# Filter to only SO images (base name begins with "so-")
tag_list = list(filter(lambda x: re.match(r'^.*\/so-[^\/]*$', get_image_name(x)), tag_list))
# Group tags into lists by base name (sort by same projection first)
tag_list.sort(key=lambda x: get_so_image_basename(x))
grouped_tag_lists = [ list(it) for _, it in groupby(tag_list, lambda x: get_so_image_basename(x)) ]
no_prunable = True
for t_list in grouped_tag_lists:
try: try:
# Prune old/stopped containers using docker CLI # Group tags by version, in case multiple images exist with the same version string
if not quiet: print('Pruning old containers') t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
run_command('docker container prune -f') grouped_t_list = [ list(it) for _,it in groupby(t_list, lambda x: get_image_version(x)) ]
# Get list of images using docker CLI # Keep the 2 most current version groups
images_json = run_command('docker images --format "{{json .}}"') if len(grouped_t_list) <= 2:
continue
# Parse the JSON output else:
image_list = [] no_prunable = False
for line in images_json.strip().split('\n'): for group in grouped_t_list[2:]:
if line: # Skip empty lines for tag in group:
image_list.append(json.loads(line)) if not quiet: print(f'Removing image {tag}')
# Extract tags in the format "name:version"
tag_list = []
for img in image_list:
# Skip dangling images
if img.get('Repository') != "<none>" and img.get('Tag') != "<none>":
tag = f"{img.get('Repository')}:{img.get('Tag')}"
# Filter to only SO images (base name begins with "so-")
if re.match(r'^.*\/so-[^\/]*$', get_image_name(tag)):
tag_list.append(tag)
# Group tags into lists by base name (sort by same projection first)
tag_list.sort(key=lambda x: get_so_image_basename(x))
grouped_tag_lists = [list(it) for k, it in groupby(tag_list, lambda x: get_so_image_basename(x))]
no_prunable = True
for t_list in grouped_tag_lists:
try: try:
# Group tags by version, in case multiple images exist with the same version string client.images.remove(tag, force=True)
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True) except docker.errors.ClientError as e:
grouped_t_list = [list(it) for k, it in groupby(t_list, lambda x: get_image_version(x))] print(f'Could not remove image {tag}, continuing...')
# Keep the 2 most current version groups except (docker.errors.APIError, InvalidVersion) as e:
if len(grouped_t_list) <= 2: print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
continue exit(1)
else:
no_prunable = False
for group in grouped_t_list[2:]:
for tag in group:
if not quiet: print(f'Removing image {tag}')
try:
run_command(f'docker rmi -f {tag}')
except Exception as e:
print(f'Could not remove image {tag}, continuing...')
except (InvalidVersion) as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1)
except Exception as e:
print('Unhandled exception occurred:')
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1)
if no_prunable and not quiet:
print('No Security Onion images to prune')
except Exception as e: except Exception as e:
print(f"Error: {e}", file=sys.stderr) print('Unhandled exception occurred:')
exit(1) print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1)
if no_prunable and not quiet:
print('No Security Onion images to prune')
if __name__ == "__main__": if __name__ == "__main__":
main_parser = argparse.ArgumentParser(add_help=False) main_parser = argparse.ArgumentParser(add_help=False)

View File

@@ -29,7 +29,6 @@ container_list() {
"so-influxdb" "so-influxdb"
"so-kibana" "so-kibana"
"so-kratos" "so-kratos"
"so-hydra"
"so-nginx" "so-nginx"
"so-pcaptools" "so-pcaptools"
"so-soc" "so-soc"
@@ -51,15 +50,16 @@ container_list() {
"so-idh" "so-idh"
"so-idstools" "so-idstools"
"so-influxdb" "so-influxdb"
"so-kafka"
"so-kibana" "so-kibana"
"so-kratos" "so-kratos"
"so-hydra"
"so-logstash" "so-logstash"
"so-mysql"
"so-nginx" "so-nginx"
"so-pcaptools" "so-pcaptools"
"so-playbook"
"so-redis" "so-redis"
"so-soc" "so-soc"
"so-soctopus"
"so-steno" "so-steno"
"so-strelka-backend" "so-strelka-backend"
"so-strelka-filestream" "so-strelka-filestream"
@@ -67,7 +67,7 @@ container_list() {
"so-strelka-manager" "so-strelka-manager"
"so-suricata" "so-suricata"
"so-telegraf" "so-telegraf"
"so-zeek" "so-zeek"
) )
else else
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
@@ -114,10 +114,6 @@ update_docker_containers() {
container_list container_list
fi fi
# all the images using ELASTICSEARCHDEFAULTS.elasticsearch.version
# does not include so-elastic-fleet since that container uses so-elastic-agent image
local IMAGES_USING_ES_VERSION=("so-elasticsearch")
rm -rf $SIGNPATH >> "$LOG_FILE" 2>&1 rm -rf $SIGNPATH >> "$LOG_FILE" 2>&1
mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1 mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1
@@ -145,36 +141,15 @@ update_docker_containers() {
$PROGRESS_CALLBACK $i $PROGRESS_CALLBACK $i
fi fi
if [[ " ${IMAGES_USING_ES_VERSION[*]} " =~ [[:space:]]${i}[[:space:]] ]]; then
# this is an es container so use version defined in elasticsearch defaults.yaml
local UPDATE_DIR='/tmp/sogh/securityonion'
if [ ! -d "$UPDATE_DIR" ]; then
UPDATE_DIR=/securityonion
fi
local v1=0
local v2=0
if [[ -f "$UPDATE_DIR/salt/elasticsearch/defaults.yaml" ]]; then
v1=$(egrep " +version: " "$UPDATE_DIR/salt/elasticsearch/defaults.yaml" | awk -F: '{print $2}' | tr -d '[:space:]')
fi
if [[ -f "$DEFAULT_SALT_DIR/salt/elasticsearch/defaults.yaml" ]]; then
v2=$(egrep " +version: " "$DEFAULT_SALT_DIR/salt/elasticsearch/defaults.yaml" | awk -F: '{print $2}' | tr -d '[:space:]')
fi
local highest_es_version=$(compare_es_versions "$v1" "$v2")
local image=$i:$highest_es_version$IMAGE_TAG_SUFFIX
local sig_url=https://sigs.securityonion.net/es-$highest_es_version/$image.sig
else
# this is not an es container so use the so version for the version
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
local sig_url=https://sigs.securityonion.net/$VERSION/$image.sig
fi
# Pull down the trusted docker image # Pull down the trusted docker image
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
run_check_net_err \ run_check_net_err \
"docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \ "docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \
"Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1 "Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1
# Get signature # Get signature
run_check_net_err \ run_check_net_err \
"curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' $sig_url --output $SIGNPATH/$image.sig" \ "curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" \
"Could not pull signature file for $image, please ensure connectivity to https://sigs.securityonion.net " \ "Could not pull signature file for $image, please ensure connectivity to https://sigs.securityonion.net " \
noretry >> "$LOG_FILE" 2>&1 noretry >> "$LOG_FILE" 2>&1
# Dump our hash values # Dump our hash values

View File

@@ -49,6 +49,10 @@ if [ "$CONTINUE" == "y" ]; then
sed -i "s|$OLD_IP|$NEW_IP|g" $file sed -i "s|$OLD_IP|$NEW_IP|g" $file
done done
echo "Granting MySQL root user permissions on $NEW_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
echo "Removing MySQL root user from $OLD_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
echo "Updating Kibana dashboards" echo "Updating Kibana dashboards"
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True

View File

@@ -95,8 +95,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in start_workers" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in buffer_initialize" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready
@@ -124,11 +122,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|No shard available" # Typical error when making a query before ES has finished loading all indices
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -153,11 +146,6 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncing rule" # false positive (rule sync log line includes rule name which can contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|request_unauthorized" # false positive (login failures to Hydra result in an 'error' log)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
fi fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -166,11 +154,15 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
@@ -181,7 +173,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to gather disk name" # InfluxDB known error, can't read disks because the container doesn't have them mounted
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
@@ -210,16 +201,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
fi fi
RESULT=0 RESULT=0
@@ -228,9 +210,7 @@ RESULT=0
CONTAINER_IDS=$(docker ps -q) CONTAINER_IDS=$(docker ps -q)
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
exclude_container so-idstools # ignore due to known issues and noisy logging exclude_container so-idstools # ignore due to known issues and noisy logging
exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers exclude_container so-playbook # ignore due to several playbook known issues
exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers
for container_id in $CONTAINER_IDS; do for container_id in $CONTAINER_IDS; do
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names") container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
@@ -248,18 +228,10 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet
exclude_log "spool" # disregard zeek analyze logs as this is data specific exclude_log "spool" # disregard zeek analyze logs as this is data specific
exclude_log "import" # disregard imported test data the contains error strings exclude_log "import" # disregard imported test data the contains error strings
exclude_log "update.log" # ignore playbook updates due to several known issues exclude_log "update.log" # ignore playbook updates due to several known issues
exclude_log "playbook.log" # ignore due to several playbook known issues
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
exclude_log "cron-close.log" # ignore since Curator has been removed exclude_log "cron-close.log" # ignore since Curator has been removed
exclude_log "curator.log" # ignore since Curator has been removed exclude_log "curator.log" # ignore since Curator has been removed
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
exclude_log "/nsm/kafka/data/" # ignore Kafka data directory from log check.
# Include Zeek reporter.log to detect errors after running known good pcap(s) through sensor
echo "/nsm/zeek/spool/logger/reporter.log" >> /tmp/log_check_files
for log_file in $(cat /tmp/log_check_files); do for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file" status "Checking log file $log_file"

View File

@@ -1,98 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0."
set -e
# This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot.
if [ -z $NOROOT ]; then
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
fi
ENROLL_TPM=N
while [[ $# -gt 0 ]]; do
case $1 in
--enroll-tpm)
ENROLL_TPM=Y
;;
*)
echo "Usage: $0 [options]"
echo ""
echo "where options are:"
echo " --enroll-tpm for when TPM enrollment was not selected during ISO install."
echo ""
exit 1
;;
esac
shift
done
check_for_tpm() {
echo -n "Checking for TPM: "
if [ -d /sys/class/tpm/tpm0 ]; then
echo -e "tpm0 found."
TPM="yes"
# Check if TPM is using sha1 or sha256
if [ -d /sys/class/tpm/tpm0/pcr-sha1 ]; then
echo -e "TPM is using sha1.\n"
TPM_PCR="sha1"
elif [ -d /sys/class/tpm/tpm0/pcr-sha256 ]; then
echo -e "TPM is using sha256.\n"
TPM_PCR="sha256"
fi
else
echo -e "No TPM found.\n"
exit 1
fi
}
check_for_luks_partitions() {
echo "Checking for LUKS partitions"
for part in $(lsblk -o NAME,FSTYPE -ln | grep crypto_LUKS | awk '{print $1}'); do
echo "Found LUKS partition: $part"
LUKS_PARTITIONS+=("$part")
done
if [ ${#LUKS_PARTITIONS[@]} -eq 0 ]; then
echo -e "No LUKS partitions found.\n"
exit 1
fi
echo ""
}
enroll_tpm_in_luks() {
read -s -p "Enter the LUKS passphrase used during ISO install: " LUKS_PASSPHRASE
echo ""
for part in "${LUKS_PARTITIONS[@]}"; do
echo "Enrolling TPM for LUKS device: /dev/$part"
if [ "$TPM_PCR" == "sha1" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha1","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
elif [ "$TPM_PCR" == "sha256" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
fi
done
}
regenerate_tpm_enrollment_token() {
for part in "${LUKS_PARTITIONS[@]}"; do
clevis luks regen -d /dev/$part -s 1 -q
done
}
check_for_tpm
check_for_luks_partitions
if [[ $ENROLL_TPM == "Y" ]]; then
enroll_tpm_in_luks
else
regenerate_tpm_enrollment_token
fi
echo "Running dracut"
dracut -fv
echo -e "\nTPM configuration complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n"

View File

@@ -10,7 +10,7 @@
. /usr/sbin/so-common . /usr/sbin/so-common
. /usr/sbin/so-image-common . /usr/sbin/so-image-common
REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"} REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)}
REPLAYSPEED=${REPLAYSPEED:-10} REPLAYSPEED=${REPLAYSPEED:-10}
mkdir -p /opt/so/samples mkdir -p /opt/so/samples

View File

@@ -63,7 +63,7 @@ function status {
function pcapinfo() { function pcapinfo() {
PCAP=$1 PCAP=$1
ARGS=$2 ARGS=$2
docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -ae $ARGS docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
} }
function pcapfix() { function pcapfix() {
@@ -89,7 +89,6 @@ function suricata() {
-v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${LOG_PATH}:/var/log/suricata/:rw \
-v ${NSM_PATH}/:/nsm/:rw \ -v ${NSM_PATH}/:/nsm/:rw \
-v "$PCAP:/input.pcap:ro" \ -v "$PCAP:/input.pcap:ro" \
-v /dev/null:/nsm/suripcap:rw \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
@@ -248,7 +247,7 @@ fi
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g') START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g') END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
status "Import complete!" status "Import complete!"
status status

View File

@@ -9,9 +9,6 @@
. /usr/sbin/so-common . /usr/sbin/so-common
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
{%- if salt['grains.get']('sosmodel', '') %} {%- if salt['grains.get']('sosmodel', '') %}
{%- set model = salt['grains.get']('sosmodel') %} {%- set model = salt['grains.get']('sosmodel') %}
model={{ model }} model={{ model }}
@@ -19,42 +16,33 @@ model={{ model }}
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
exit 0 exit 0
fi fi
for i in "${software_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=true
is_hwraid=false
break
fi
done
for i in "${hardware_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=false
is_hwraid=true
break
fi
done
{%- else %} {%- else %}
echo "This is not an appliance" echo "This is not an appliance"
exit 0 exit 0
{%- endif %} {%- endif %}
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
is_bossraid=true
fi
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
is_swraid=true
fi
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
is_hwraid=true
fi
check_nsm_raid() { check_nsm_raid() {
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl) PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal) MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
if [[ "$model" == "SOS500" || "$model" == "SOS500-DE02" ]]; then
#This doesn't have raid if [[ $APPLIANCE == '1' ]]; then
HWRAID=0
else
if [[ -n $PERCCLI ]]; then if [[ -n $PERCCLI ]]; then
HWRAID=0 HWRAID=0
elif [[ -n $MEGACTL ]]; then elif [[ -n $MEGACTL ]]; then
HWRAID=0 HWRAID=0
else else
HWRAID=1 HWRAID=1
fi fi
fi fi
} }
@@ -62,27 +50,17 @@ check_nsm_raid() {
check_boss_raid() { check_boss_raid() {
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional) MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter") MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter")
BOSSNVMECLI=$(/usr/local/bin/mnv_cli info -o vd -i 0 | grep Functional)
# Is this NVMe Boss Raid? # Check to see if this is a SM based system
if [[ "$model" =~ "-DE02" ]]; then if [[ -z $MVTEST ]]; then
if [[ -n $BOSSNVMECLI ]]; then if [[ -n $MVCLI ]]; then
BOSSRAID=0 BOSSRAID=0
else else
BOSSRAID=1 BOSSRAID=1
fi fi
else else
# Check to see if this is a SM based system # This doesn't have boss raid so lets make it 0
if [[ -z $MVTEST ]]; then BOSSRAID=0
if [[ -n $MVCLI ]]; then
BOSSRAID=0
else
BOSSRAID=1
fi
else
# This doesn't have boss raid so lets make it 0
BOSSRAID=0
fi
fi fi
} }
@@ -101,13 +79,14 @@ SWRAID=0
BOSSRAID=0 BOSSRAID=0
HWRAID=0 HWRAID=0
if [[ "$is_hwraid" == "true" ]]; then if [[ $is_hwraid ]]; then
check_nsm_raid check_nsm_raid
check_boss_raid
fi fi
if [[ "$is_softwareraid" == "true" ]]; then if [[ $is_bossraid ]]; then
check_boss_raid
fi
if [[ $is_swraid ]]; then
check_software_raid check_software_raid
check_boss_raid
fi fi
sum=$(($SWRAID + $BOSSRAID + $HWRAID)) sum=$(($SWRAID + $BOSSRAID + $HWRAID))

View File

@@ -334,7 +334,6 @@ desktop_packages:
- pulseaudio-libs - pulseaudio-libs
- pulseaudio-libs-glib2 - pulseaudio-libs-glib2
- pulseaudio-utils - pulseaudio-utils
- putty
- sane-airscan - sane-airscan
- sane-backends - sane-backends
- sane-backends-drivers-cameras - sane-backends-drivers-cameras

View File

@@ -51,14 +51,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-hydra':
final_octet: 30
port_bindings:
- 0.0.0.0:4444:4444
- 0.0.0.0:4445:4445
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-logstash': 'so-logstash':
final_octet: 29 final_octet: 29
port_bindings: port_bindings:
@@ -75,6 +67,13 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-mysql':
final_octet: 30
port_bindings:
- 0.0.0.0:3306:3306
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-nginx': 'so-nginx':
final_octet: 31 final_octet: 31
port_bindings: port_bindings:
@@ -82,14 +81,13 @@ docker:
- 443:443 - 443:443
- 8443:8443 - 8443:8443
- 7788:7788 - 7788:7788
- 7789:7789
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-nginx-fleet-node': 'so-playbook':
final_octet: 31 final_octet: 32
port_bindings: port_bindings:
- 8443:8443 - 0.0.0.0:3000:3000
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
@@ -113,6 +111,13 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-soctopus':
final_octet: 35
port_bindings:
- 0.0.0.0:7000:7000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-backend': 'so-strelka-backend':
final_octet: 36 final_octet: 36
custom_bind_mounts: [] custom_bind_mounts: []
@@ -189,20 +194,8 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits:
- memlock=524288000
'so-zeek': 'so-zeek':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-kafka':
final_octet: 88
port_bindings:
- 0.0.0.0:9092:9092
- 0.0.0.0:29092:29092
- 0.0.0.0:9093:9093
- 0.0.0.0:8778:8778
custom_bind_mounts: []
extra_hosts: []
extra_env: []

View File

@@ -20,41 +20,41 @@ dockergroup:
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-1 - containerd.io: 1.6.21-1
- docker-ce: 5:27.2.0-1~debian.12~bookworm - docker-ce: 5:24.0.3-1~debian.12~bookworm
- docker-ce-cli: 5:27.2.0-1~debian.12~bookworm - docker-ce-cli: 5:24.0.3-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:27.2.0-1~debian.12~bookworm - docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm
- hold: True - hold: True
- update_holds: True - update_holds: True
{% elif grains.oscodename == 'jammy' %} {% elif grains.oscodename == 'jammy' %}
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-1 - containerd.io: 1.6.21-1
- docker-ce: 5:27.2.0-1~ubuntu.22.04~jammy - docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:27.2.0-1~ubuntu.22.04~jammy - docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.22.04~jammy - docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy
- hold: True - hold: True
- update_holds: True - update_holds: True
{% else %} {% else %}
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-1 - containerd.io: 1.4.9-1
- docker-ce: 5:27.2.0-1~ubuntu.20.04~focal - docker-ce: 5:20.10.8~3-0~ubuntu-focal
- docker-ce-cli: 5:27.2.0-1~ubuntu.20.04~focal - docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.20.04~focal - docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal
- hold: True - hold: True
- update_holds: True - update_holds: True
{% endif %} {% endif %}
{% else %} {% else %}
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-3.1.el9 - containerd.io: 1.6.21-3.1.el9
- docker-ce: 3:27.2.0-1.el9 - docker-ce: 24.0.4-1.el9
- docker-ce-cli: 1:27.2.0-1.el9 - docker-ce-cli: 24.0.4-1.el9
- docker-ce-rootless-extras: 27.2.0-1.el9 - docker-ce-rootless-extras: 24.0.4-1.el9
- hold: True - hold: True
- update_holds: True - update_holds: True
{% endif %} {% endif %}

View File

@@ -45,13 +45,14 @@ docker:
so-influxdb: *dockerOptions so-influxdb: *dockerOptions
so-kibana: *dockerOptions so-kibana: *dockerOptions
so-kratos: *dockerOptions so-kratos: *dockerOptions
so-hydra: *dockerOptions
so-logstash: *dockerOptions so-logstash: *dockerOptions
so-mysql: *dockerOptions
so-nginx: *dockerOptions so-nginx: *dockerOptions
so-nginx-fleet-node: *dockerOptions so-playbook: *dockerOptions
so-redis: *dockerOptions so-redis: *dockerOptions
so-sensoroni: *dockerOptions so-sensoroni: *dockerOptions
so-soc: *dockerOptions so-soc: *dockerOptions
so-soctopus: *dockerOptions
so-strelka-backend: *dockerOptions so-strelka-backend: *dockerOptions
so-strelka-filestream: *dockerOptions so-strelka-filestream: *dockerOptions
so-strelka-frontend: *dockerOptions so-strelka-frontend: *dockerOptions
@@ -64,42 +65,5 @@ docker:
so-elastic-agent: *dockerOptions so-elastic-agent: *dockerOptions
so-telegraf: *dockerOptions so-telegraf: *dockerOptions
so-steno: *dockerOptions so-steno: *dockerOptions
so-suricata: so-suricata: *dockerOptions
final_octet:
description: Last octet of the container IP address.
helpLink: docker.html
readonly: True
advanced: True
global: True
port_bindings:
description: List of port bindings for the container.
helpLink: docker.html
advanced: True
multiline: True
forcedType: "[]string"
custom_bind_mounts:
description: List of custom local volume bindings.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_hosts:
description: List of additional host entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
ulimits:
description: Ulimits for the container, in bytes.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions so-zeek: *dockerOptions
so-kafka: *dockerOptions

View File

@@ -82,36 +82,6 @@ elastasomodulesync:
- group: 933 - group: 933
- makedirs: True - makedirs: True
elastacustomdir:
file.directory:
- name: /opt/so/conf/elastalert/custom
- user: 933
- group: 933
- makedirs: True
elastacustomsync:
file.recurse:
- name: /opt/so/conf/elastalert/custom
- source: salt://elastalert/files/custom
- user: 933
- group: 933
- makedirs: True
- file_mode: 660
- show_changes: False
elastapredefinedsync:
file.recurse:
- name: /opt/so/conf/elastalert/predefined
- source: salt://elastalert/files/predefined
- user: 933
- group: 933
- makedirs: True
- template: jinja
- file_mode: 660
- context:
elastalert: {{ ELASTALERTMERGED }}
- show_changes: False
elastaconf: elastaconf:
file.managed: file.managed:
- name: /opt/so/conf/elastalert/elastalert_config.yaml - name: /opt/so/conf/elastalert/elastalert_config.yaml

View File

@@ -1,6 +1,5 @@
elastalert: elastalert:
enabled: False enabled: False
alerter_parameters: ""
config: config:
rules_folder: /opt/elastalert/rules/ rules_folder: /opt/elastalert/rules/
scan_subdirectories: true scan_subdirectories: true

View File

@@ -30,8 +30,6 @@ so-elastalert:
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro - /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
- /opt/so/log/elastalert:/var/log/elastalert:rw - /opt/so/log/elastalert:/var/log/elastalert:rw
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro - /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
- /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro
- /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro - /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %} {% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}

View File

@@ -1 +0,0 @@
THIS IS A PLACEHOLDER FILE

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PlaybookESAlerter(Alerter):
"""
Use matched data to create alerts in elasticsearch
"""
required_options = set(['play_title','play_url','sigma_level'])
def alert(self, matches):
for match in matches:
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'PlaybookESAlerter'}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class SecurityOnionESAlerter(Alerter):
"""
Use matched data to create alerts in Elasticsearch.
"""
required_options = set(['detection_title', 'sigma_level'])
optional_fields = ['sigma_category', 'sigma_product', 'sigma_service']
def alert(self, matches):
for match in matches:
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
# Start building the rule dict
rule_info = {
"name": self.rule['detection_title'],
"uuid": self.rule['detection_public_id']
}
# Add optional fields if they are present in the rule
for field in self.optional_fields:
rule_key = field.split('_')[-1] # Assumes field format "sigma_<key>"
if field in self.rule:
rule_info[rule_key] = self.rule[field]
# Construct the payload with the conditional rule_info
payload = {
"tags": "alert",
"rule": rule_info,
"event": {
"severity": self.rule['event.severity'],
"module": self.rule['event.module'],
"dataset": self.rule['event.dataset'],
"severity_label": self.rule['sigma_level']
},
"sigma_level": self.rule['sigma_level'],
"event_data": match,
"@timestamp": timestamp
}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-detections.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'SecurityOnionESAlerter'}

View File

@@ -1,6 +0,0 @@
{% if elastalert.get('jira_user', '') | length > 0 and elastalert.get('jira_pass', '') | length > 0 %}
user: {{ elastalert.jira_user }}
password: {{ elastalert.jira_pass }}
{% else %}
apikey: {{ elastalert.get('jira_api_key', '') }}
{% endif %}

View File

@@ -1,2 +0,0 @@
user: {{ elastalert.get('smtp_user', '') }}
password: {{ elastalert.get('smtp_pass', '') }}

View File

@@ -13,19 +13,3 @@
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %} {% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
{% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %} {% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %}
{% if 'ntf' in salt['pillar.get']('features', []) %}
{% set params = ELASTALERTMERGED.get('alerter_parameters', '') | load_yaml %}
{% if params != None and params | length > 0 %}
{% do ELASTALERTMERGED.config.update(params) %}
{% endif %}
{% if ELASTALERTMERGED.get('smtp_user', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %}
{% endif %}
{% if ELASTALERTMERGED.get('jira_user', '') | length > 0 or ELASTALERTMERGED.get('jira_key', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %}
{% endif %}
{% endif %}

View File

@@ -1,100 +1,7 @@
elastalert: elastalert:
enabled: enabled:
description: Enables or disables the ElastAlert 2 process. This process is critical for ensuring alerts arrive in SOC, and for outbound notification delivery. description: You can enable or disable Elastalert.
helpLink: elastalert.html helpLink: elastalert.html
alerter_parameters:
title: Custom Configuration Parameters
description: Optional configuration parameters made available as defaults for all rules and alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available configuration parameters. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: elastalert.html
forcedType: string
jira_api_key:
title: Jira API Key
description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_pass:
title: Jira Password
description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_user:
title: Jira Username
description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
smtp_pass:
title: SMTP Password
description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
smtp_user:
title: SMTP Username
description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
files:
custom:
alertmanager_ca__crt:
description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
gelf_ca__crt:
description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post2_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
ms_teams_ca__crt:
description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
pagerduty_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
rocket_chat_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__crt:
description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__key:
description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
slack_ca__crt:
description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
config: config:
disable_rules_on_error: disable_rules_on_error:
description: Disable rules on failure. description: Disable rules on failure.

View File

@@ -1,4 +1,4 @@
elastic_fleet_package_registry: elastic_fleet_package_registry:
enabled: enabled:
description: Enables or disables the Fleet package registry process. This process must remain enabled to allow Elastic Agent packages to be updated. description: You can enable or disable Elastic Fleet Package Registry.
advanced: True advanced: True

View File

@@ -8,6 +8,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- elasticagent.config - elasticagent.config
- elasticagent.sostatus - elasticagent.sostatus

View File

@@ -1,4 +0,0 @@
elasticagent:
enabled:
description: Enables or disables the Elastic Agent process. This process must remain enabled to allow collection of node events.
advanced: True

View File

@@ -30,7 +30,6 @@ elasticfleet_sbin:
- user: 947 - user: 947
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
- show_changes: False
elasticfleet_sbin_jinja: elasticfleet_sbin_jinja:
file.recurse: file.recurse:
@@ -42,7 +41,6 @@ elasticfleet_sbin_jinja:
- template: jinja - template: jinja
- exclude_pat: - exclude_pat:
- so-elastic-fleet-package-upgrade # exclude this because we need to watch it for changes - so-elastic-fleet-package-upgrade # exclude this because we need to watch it for changes
- show_changes: False
eaconfdir: eaconfdir:
file.directory: file.directory:
@@ -65,14 +63,6 @@ eastatedir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
custommappingsdir:
file.directory:
- name: /nsm/custom-mappings
- user: 947
- group: 939
- makedirs: True
eapackageupgrade: eapackageupgrade:
file.managed: file.managed:
- name: /usr/sbin/so-elastic-fleet-package-upgrade - name: /usr/sbin/so-elastic-fleet-package-upgrade
@@ -83,56 +73,6 @@ eapackageupgrade:
- template: jinja - template: jinja
{% if GLOBALS.role != "so-fleet" %} {% if GLOBALS.role != "so-fleet" %}
{% if not GLOBALS.airgap %}
soresourcesrepoclone:
git.latest:
- name: https://github.com/Security-Onion-Solutions/securityonion-resources.git
- target: /nsm/securityonion-resources
- rev: 'main'
- depth: 1
- force_reset: True
{% endif %}
elasticdefendconfdir:
file.directory:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/rulesets
- user: 947
- group: 939
- makedirs: True
elasticdefenddisabled:
file.managed:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml
- source: salt://elasticfleet/files/soc/elastic-defend-disabled-filters.yaml
- user: 947
- group: 939
- mode: 600
elasticdefendcustom:
file.managed:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters-raw
- source: salt://elasticfleet/files/soc/elastic-defend-custom-filters.yaml
- user: 947
- group: 939
- mode: 600
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
cron-elastic-defend-filters:
cron.{{ap}}:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- identifier: elastic-defend-filters
- user: root
- minute: '0'
- hour: '3'
- daymonth: '*'
- month: '*'
- dayweek: '*'
eaintegrationsdir: eaintegrationsdir:
file.directory: file.directory:
- name: /opt/so/conf/elastic-fleet/integrations - name: /opt/so/conf/elastic-fleet/integrations
@@ -147,7 +87,6 @@ eadynamicintegration:
- user: 947 - user: 947
- group: 939 - group: 939
- template: jinja - template: jinja
- show_changes: False
eaintegration: eaintegration:
file.recurse: file.recurse:
@@ -155,7 +94,6 @@ eaintegration:
- source: salt://elasticfleet/files/integrations - source: salt://elasticfleet/files/integrations
- user: 947 - user: 947
- group: 939 - group: 939
- show_changes: False
eaoptionalintegrationsdir: eaoptionalintegrationsdir:
file.directory: file.directory:

View File

@@ -8,10 +8,6 @@ elasticfleet:
endpoints_enrollment: '' endpoints_enrollment: ''
es_token: '' es_token: ''
grid_enrollment: '' grid_enrollment: ''
defend_filters:
enable_auto_configuration: False
subscription_integrations: False
auto_upgrade_integrations: False
logging: logging:
zeek: zeek:
excluded: excluded:
@@ -34,20 +30,82 @@ elasticfleet:
- stderr - stderr
- stdout - stdout
packages: packages:
- apache
- auditd
- auth0
- aws
- azure
- barracuda
- carbonblack_edr
- checkpoint
- cisco_asa
- cisco_duo
- cisco_ftd
- cisco_ios
- cisco_ise
- cisco_meraki
- cisco_umbrella
- cloudflare
- crowdstrike
- darktrace
- elastic_agent - elastic_agent
- elasticsearch - elasticsearch
- endpoint - endpoint
- f5_bigip
- fim
- fireeye
- fleet_server - fleet_server
- fortinet
- fortinet_fortigate
- gcp
- github
- google_workspace
- http_endpoint - http_endpoint
- httpjson - httpjson
- iis
- juniper
- juniper_srx
- kafka_log
- lastpass
- log - log
- m365_defender
- microsoft_defender_endpoint
- microsoft_dhcp
- microsoft_sqlserver
- mimecast
- mysql
- netflow
- o365
- okta
- osquery_manager - osquery_manager
- panw
- pfsense
- proofpoint_tap
- pulse_connect_secure
- redis - redis
- sentinel_one
- snort
- snyk
- sonicwall_firewall
- sophos
- sophos_central
- symantec_endpoint
- system - system
- tcp - tcp
- tenable_sc
- ti_abusech
- ti_anomali
- ti_cybersixgill
- ti_misp
- ti_otx
- ti_recordedfuture
- ti_threatq
- udp - udp
- vsphere
- windows - windows
- winlog - zscaler_zia
- zscaler_zpa
- 1password
optional_integrations: optional_integrations:
sublime_platform: sublime_platform:
enabled_nodes: [] enabled_nodes: []
@@ -55,8 +113,3 @@ elasticfleet:
base_url: https://api.platform.sublimesecurity.com base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m poll_interval: 5m
limit: 100 limit: 100
kismet:
base_url: http://localhost:2501
poll_interval: 1m
api_key:
enabled_nodes: []

View File

@@ -17,21 +17,12 @@ include:
- elasticfleet.sostatus - elasticfleet.sostatus
- ssl - ssl
{% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet:
cmd.run:
- name: so-elasticsearch-wait
{% endif %}
# If enabled, automatically update Fleet Logstash Outputs # If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %} {% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs: so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update - name: /usr/sbin/so-elastic-fleet-outputs-update
- retry: - retry: True
attempts: 4
interval: 30
{% endif %} {% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection # If enabled, automatically update Fleet Server URLs & ES Connection
@@ -39,35 +30,15 @@ so-elastic-fleet-auto-configure-logstash-outputs:
so-elastic-fleet-auto-configure-server-urls: so-elastic-fleet-auto-configure-server-urls:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update - name: /usr/sbin/so-elastic-fleet-urls-update
- retry: - retry: True
attempts: 4
interval: 30
{% endif %} {% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs # Automatically update Fleet Server Elasticsearch URLs
{% if grains.role not in ['so-fleet'] %} {% if grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-elasticsearch-urls: so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update - name: /usr/sbin/so-elastic-fleet-es-url-update
- retry: - retry: True
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry:
attempts: 4
interval: 30
{% endif %}
# Sync Elastic Agent artifacts to Fleet Node
{% if grains.role in ['so-fleet'] %}
elasticagent_syncartifacts:
file.recurse:
- name: /nsm/elastic-fleet/artifacts/beats
- source: salt://beats
{% endif %} {% endif %}
{% if SERVICETOKEN != '' %} {% if SERVICETOKEN != '' %}
@@ -143,26 +114,7 @@ so-elastic-fleet-integrations:
so-elastic-agent-grid-upgrade: so-elastic-agent-grid-upgrade:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-agent-grid-upgrade - name: /usr/sbin/so-elastic-agent-grid-upgrade
- retry: - retry: True
attempts: 12
interval: 5
so-elastic-fleet-integration-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
so-elastic-fleet-addon-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
so-elastic-defend-manage-filters-file-watch:
cmd.run:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- onchanges:
- file: elasticdefendcustom
- file: elasticdefenddisabled
{% endif %}
{% endif %} {% endif %}
delete_so-elastic-fleet_so-status.disabled: delete_so-elastic-fleet_so-status.disabled:

View File

@@ -1,19 +0,0 @@
{
"package": {
"name": "fleet_server",
"version": ""
},
"name": "fleet_server-1",
"namespace": "default",
"policy_id": "FleetServer_hostname",
"vars": {},
"inputs": {
"fleet_server-fleet-server": {
"enabled": true,
"vars": {
"custom": "server.ssl.supported_protocols: [\"TLSv1.2\", \"TLSv1.3\"]\nserver.ssl.cipher_suites: [ \"ECDHE-RSA-AES-128-GCM-SHA256\", \"ECDHE-RSA-AES-256-GCM-SHA384\", \"ECDHE-RSA-AES-128-CBC-SHA\", \"ECDHE-RSA-AES-256-CBC-SHA\", \"RSA-AES-128-GCM-SHA256\", \"RSA-AES-256-GCM-SHA384\"]"
},
"streams": {}
}
}
}

View File

@@ -1,46 +0,0 @@
{%- set identities = salt['sqlite3.fetch']('/nsm/kratos/db/db.sqlite', 'SELECT id, json_extract(traits, "$.email") as email FROM identities;') -%}
{%- set valid_identities = false -%}
{%- if identities -%}
{%- set valid_identities = true -%}
{%- for id, email in identities -%}
{%- if not id or not email -%}
{%- set valid_identities = false -%}
{%- break -%}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{
"package": {
"name": "log",
"version": ""
},
"name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/kratos/kratos.log"
],
"data_stream.dataset": "kratos",
"tags": ["so-kratos"],
{%- if valid_identities -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
{%- else -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
{%- endif -%}
"custom": "pipeline: kratos"
}
}
}
}
},
"force": true
}

View File

@@ -1,36 +0,0 @@
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% raw %}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "kismet-logs",
"namespace": "so",
"description": "Kismet Logs",
"policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}",
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"data_stream.dataset": "kismet",
"request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson",
"request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}",
"request_method": "GET",
"request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'",
"request_redirect_headers_ban_list": [],
"oauth_scopes": [],
"processors": "",
"tags": [],
"pipeline": "kismet.common"
}
}
}
}
},
"force": true
}
{% endraw %}

View File

@@ -3,30 +3,25 @@
"namespace": "default", "namespace": "default",
"description": "", "description": "",
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "8.17.0", "version": "8.10.2"
"requires_root": true
}, },
"enabled": true, "enabled": true,
"policy_id": "endpoints-initial", "policy_id": "endpoints-initial",
"vars": {}, "inputs": [{
"inputs": [ "type": "ENDPOINT_INTEGRATION_CONFIG",
{
"type": "endpoint",
"enabled": true, "enabled": true,
"streams": [],
"config": { "config": {
"integration_config": { "_config": {
"value": { "value": {
"type": "endpoint", "type": "endpoint",
"endpointConfig": { "endpointConfig": {
"preset": "DataCollection" "preset": "DataCollection"
} }
}
} }
} }
}, }]
"streams": [] }
}
]
}

View File

@@ -1,29 +0,0 @@
{
"package": {
"name": "winlog",
"version": ""
},
"name": "windows-defender",
"namespace": "default",
"description": "Windows Defender - Operational logs",
"policy_id": "endpoints-initial",
"inputs": {
"winlogs-winlog": {
"enabled": true,
"streams": {
"winlog.winlogs": {
"enabled": true,
"vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational",
"data_stream.dataset": "winlog.winlog",
"preserve_original_event": false,
"providers": [],
"ignore_older": "72h",
"language": 0,
"tags": [] }
}
}
}
},
"force": true
}

View File

@@ -20,7 +20,7 @@
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"custom": "", "custom": "",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.67.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-2.5.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.67.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.67.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-2.5.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.43.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.38.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.43.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.43.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.38.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
] ]

View File

@@ -3,9 +3,9 @@
"name": "log", "name": "log",
"version": "" "version": ""
}, },
"name": "hydra-logs", "name": "kratos-logs",
"namespace": "so", "namespace": "so",
"description": "Hydra logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
@@ -15,12 +15,12 @@
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/hydra/hydra.log" "/opt/so/log/kratos/kratos.log"
], ],
"data_stream.dataset": "hydra", "data_stream.dataset": "kratos",
"tags": ["so-hydra"], "tags": ["so-kratos"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
"custom": "pipeline: hydra" "custom": "pipeline: kratos"
} }
} }
} }

View File

@@ -1,34 +0,0 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "rita-logs",
"namespace": "so",
"description": "RITA Logs",
"policy_id": "so-grid-nodes_general",
"vars": {},
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/rita/beacons.csv",
"/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "rita",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
}
}
}
}
}
}

View File

@@ -1,35 +0,0 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings",
"policy_id": "so-grid-nodes_general",
"vars": {},
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/custom-mappings/ip-descriptions.csv"
],
"data_stream.dataset": "hostnamemappings",
"tags": [
"so-ip-mappings"
],
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"custom": ""
}
}
}
}
},
"force": true
}

View File

@@ -1,35 +0,0 @@
{
"policy_id": "so-grid-nodes_general",
"package": {
"name": "log",
"version": ""
},
"name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs",
"namespace": "so",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc",
"tags": [
"so-soc"
],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}

View File

@@ -11,7 +11,7 @@
"udp-udp": { "udp-udp": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"udp.udp": { "udp.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"listen_address": "0.0.0.0", "listen_address": "0.0.0.0",
@@ -20,13 +20,11 @@
"pipeline": "syslog", "pipeline": "syslog",
"max_message_size": "10KiB", "max_message_size": "10KiB",
"keep_null": false, "keep_null": false,
"processors": "- add_fields:\n target: event\n fields: \n module: syslog", "processors": "- add_fields:\n target: event\n fields: \n module: syslog\n",
"tags": [ "tags": [
"syslog" "syslog"
], ],
"syslog_options": "field: message\n#format: auto\n#timezone: Local\n", "syslog_options": "field: message\n#format: auto\n#timezone: Local"
"preserve_original_event": false,
"custom": ""
} }
} }
} }

View File

@@ -16,9 +16,6 @@
"paths": [ "paths": [
"/var/log/auth.log*", "/var/log/auth.log*",
"/var/log/secure*" "/var/log/secure*"
],
"tags": [
"so-grid-node"
] ]
} }
}, },
@@ -28,11 +25,7 @@
"paths": [ "paths": [
"/var/log/messages*", "/var/log/messages*",
"/var/log/syslog*" "/var/log/syslog*"
], ]
"tags": [
"so-grid-node"
],
"processors": "- if:\n contains:\n message: \"salt-minion\"\n then: \n - dissect:\n tokenizer: \"%{} %{} %{} %{} %{} %{}: [%{log.level}] %{*}\"\n field: \"message\"\n trim_values: \"all\"\n target_prefix: \"\"\n - drop_event:\n when:\n equals:\n log.level: \"INFO\""
} }
} }
} }

View File

@@ -16,9 +16,6 @@
"paths": [ "paths": [
"/var/log/auth.log*", "/var/log/auth.log*",
"/var/log/secure*" "/var/log/secure*"
],
"tags": [
"so-grid-node"
] ]
} }
}, },
@@ -28,9 +25,6 @@
"paths": [ "paths": [
"/var/log/messages*", "/var/log/messages*",
"/var/log/syslog*" "/var/log/syslog*"
],
"tags": [
"so-grid-node"
] ]
} }
} }

View File

@@ -1,27 +0,0 @@
title: 'Template 1'
id: 'This needs to be a UUIDv4 id - https://www.uuidgenerator.net/version4'
description: 'Short description detailing what this rule is filtering and why.'
references: 'Relevant urls, etc'
author: '@SecurityOnion'
date: 'MM/DD/YY'
event_type: 'dns_query'
filter_type: 'exclude'
filter:
selection_1:
TargetField: 'QueryName'
Condition: 'end with'
Pattern: '.thawte.com'
---
title: 'Template 2'
id: 'This needs to be a UUIDv4 id - https://www.uuidgenerator.net/version4'
description: 'Short description detailing what this rule is filtering and why.'
references: 'Relevant urls, etc'
author: '@SecurityOnion'
date: 'MM/DD/YY'
event_type: 'process_creation'
filter_type: 'exclude'
filter:
selection_1:
TargetField: 'ParentImage'
Condition: 'is'
Pattern: 'C:\Windows\Microsoft.NET\Framework\v4.0.30319\ngentask.exe'

View File

@@ -1,3 +0,0 @@
'9EDAA51C-BB12-49D9-8748-2B61371F2E7D':
Date: '10/10/2024'
Notes: 'Example Disabled Filter - Leave this entry here, just copy and paste as needed.'

View File

@@ -1,133 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
this file except in compliance with the Elastic License 2.0. #}
{% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %}
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
{% set ADDON_INTEGRATION_DEFAULTS = {} %}
{# Some fleet integrations don't follow the standard naming convention #}
{% set WEIRD_INTEGRATIONS = {
'awsfirehose.logs': 'awsfirehose',
'awsfirehose.metrics': 'aws.cloudwatch',
'cribl.logs': 'cribl',
'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login',
'azure_application_insights.app_insights': 'azure.app_insights',
'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance',
'azure_metrics.container_registry': 'azure.container_registry',
'azure_metrics.container_service': 'azure.container_service',
'azure_metrics.database_account': 'azure.database_account',
'azure_metrics.monitor': 'azure.monitor',
'azure_metrics.storage_account': 'azure.storage_account',
'azure_openai.metrics': 'azure.open_ai',
'beat.state': 'beats.stack_monitoring.state',
'beat.stats': 'beats.stack_monitoring.stats',
'enterprisesearch.health': 'enterprisesearch.stack_monitoring.health',
'enterprisesearch.stats': 'enterprisesearch.stack_monitoring.stats',
'kibana.cluster_actions': 'kibana.stack_monitoring.cluster_actions',
'kibana.cluster_rules': 'kibana.stack_monitoring.cluster_rules',
'kibana.node_actions': 'kibana.stack_monitoring.node_actions',
'kibana.node_rules': 'kibana.stack_monitoring.node_rules',
'kibana.stats': 'kibana.stack_monitoring.stats',
'kibana.status': 'kibana.stack_monitoring.status',
'logstash.node_cel': 'logstash.stack_monitoring.node',
'logstash.node_stats': 'logstash.stack_monitoring.node_stats',
'synthetics.browser': 'synthetics-browser',
'synthetics.browser_network': 'synthetics-browser.network',
'synthetics.browser_screenshot': 'synthetics-browser.screenshot',
'synthetics.http': 'synthetics-http',
'synthetics.icmp': 'synthetics-icmp',
'synthetics.tcp': 'synthetics-tcp'
} %}
{% for pkg in ADDON_PACKAGE_COMPONENTS %}
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
{# skip core integrations #}
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
{# generate defaults for each integration #}
{% if pkg.es_index_patterns is defined and pkg.es_index_patterns is not none %}
{% for pattern in pkg.es_index_patterns %}
{% if "metrics-" in pattern.name %}
{% set integration_type = "metrics-" %}
{% elif "logs-" in pattern.name %}
{% set integration_type = "logs-" %}
{% else %}
{% set integration_type = "" %}
{% endif %}
{% set component_name = pkg.name ~ "." ~ pattern.title %}
{# fix weirdly named components #}
{% if component_name in WEIRD_INTEGRATIONS %}
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
{% endif %}
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
{% set component_name_x = component_name.replace(".","_x_") %}
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
{# Default integration settings #}
{% set integration_defaults = {
"index_sorting": false,
"index_template": {
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
"data_stream": {
"allow_custom_routing": false,
"hidden": false
},
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
"index_patterns": [pattern.name],
"priority": 501,
"template": {
"settings": {
"index": {
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
"number_of_replicas": 0
}
}
}
},
"policy": {
"phases": {
"cold": {
"actions": {
"set_priority": {"priority": 0}
},
"min_age": "60d"
},
"delete": {
"actions": {
"delete": {}
},
"min_age": "365d"
},
"hot": {
"actions": {
"rollover": {
"max_age": "30d",
"max_primary_shard_size": "50gb"
},
"set_priority": {"priority": 100}
},
"min_age": "0ms"
},
"warm": {
"actions": {
"set_priority": {"priority": 50}
},
"min_age": "30d"
}
}
}
} %}
{% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
{% endfor %}
{% endif %}
{% endif %}
{% endfor %}

View File

@@ -1,6 +1,6 @@
elasticfleet: elasticfleet:
enabled: enabled:
description: Enables or disables the Elastic Fleet process. This process is critical for managing Elastic Agents. description: You can enable or disable Elastic Fleet.
advanced: True advanced: True
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
enable_manager_output: enable_manager_output:
@@ -9,24 +9,6 @@ elasticfleet:
global: True global: True
forcedType: bool forcedType: bool
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
files:
soc:
elastic-defend-disabled-filters__yaml:
title: Disabled Elastic Defend filters
description: Enter the ID of the filter that should be disabled.
syntax: yaml
file: True
global: True
helpLink: elastic-fleet.html
advanced: True
elastic-defend-custom-filters__yaml:
title: Custom Elastic Defend filters
description: Enter custom filters seperated by ---
syntax: yaml
file: True
global: True
helpLink: elastic-fleet.html
advanced: True
logging: logging:
zeek: zeek:
excluded: excluded:
@@ -34,22 +16,6 @@ elasticfleet:
forcedType: "[]string" forcedType: "[]string"
helpLink: zeek.html helpLink: zeek.html
config: config:
defend_filters:
enable_auto_configuration:
description: Enable auto-configuration and management of the Elastic Defend Exclusion filters.
global: True
helpLink: elastic-fleet.html
advanced: True
subscription_integrations:
description: Enable the installation of integrations that require an Elastic license.
global: True
forcedType: bool
helpLink: elastic-fleet.html
auto_upgrade_integrations:
description: Enables or disables automatically upgrading Elastic Agent integrations.
global: True
forcedType: bool
helpLink: elastic-fleet.html
server: server:
custom_fqdn: custom_fqdn:
description: Custom FQDN for Agents to connect to. One per line. description: Custom FQDN for Agents to connect to. One per line.
@@ -113,29 +79,3 @@ elasticfleet:
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
advanced: True advanced: True
forcedType: int forcedType: int
kismet:
base_url:
description: Base URL for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
api_key:
description: API key for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
enabled_nodes:
description: Fleet nodes with the Kismet integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"

View File

@@ -1,251 +0,0 @@
from datetime import datetime
import sys
import getopt
from so_elastic_defend_filters_helper import *
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
# Define mappings for Target Field, Event Type, Conditions
TARGET_FIELD_MAPPINGS = {
"Image": "process.executable",
"ParentImage": "process.parent.executable",
"CommandLine": "process.command_line",
"ParentCommandLine": "process.parent.command_line",
"DestinationHostname": "destination.domain",
"QueryName": "dns.question.name",
"DestinationIp": "destination.ip",
"TargetObject": "registry.path",
"TargetFilename": "file.path"
}
DATASET_MAPPINGS = {
"process_create": "endpoint.events.process",
"network_connection": "endpoint.events.network",
"file_create": "endpoint.events.file",
"file_delete": "endpoint.events.file",
"registry_event": "endpoint.events.registry",
"dns_query": "endpoint.events.network"
}
CONDITION_MAPPINGS = {
"is": ("included", "match"),
"end with": ("included", "wildcard"),
"begin with": ("included", "wildcard"),
"contains": ("included", "wildcard")
}
# Extract entries for a rule
def extract_entries(data, event_type):
entries = []
filter_data = data.get('filter', {})
for value in filter_data.values():
target_field = TARGET_FIELD_MAPPINGS.get(value.get('TargetField', ''))
condition = value.get('Condition', '')
pattern = value.get('Pattern', '')
if condition not in CONDITION_MAPPINGS:
logging.error(f"Invalid condition: {condition}")
# Modify the pattern based on the condition
pattern = modify_pattern(condition, pattern)
operator, match_type = CONDITION_MAPPINGS[condition]
entries.append({
"field": target_field,
"operator": operator,
"type": match_type,
"value": pattern
})
# Add the event.dataset entry from DATASET_MAPPINGS
dataset_value = DATASET_MAPPINGS.get(event_type, '')
if dataset_value:
entries.append({
"field": "event.dataset",
"operator": "included",
"type": "match",
"value": dataset_value
})
else:
logging.error(f"No dataset mapping found for event_type: {event_type}")
return entries
# Build the JSON
def build_json_entry(entries, guid, event_type, context):
return {
"comments": [],
"entries": entries,
"item_id": guid,
"name": f"SO - {event_type} - {guid}",
"description": f"{context}\n\n <<- Note: This filter is managed by Security Onion. ->>",
"namespace_type": "agnostic",
"tags": ["policy:all"],
"type": "simple",
"os_types": ["windows"],
"entries": entries
}
# Check to see if the rule is disabled
# If it is, make sure it is not active
def disable_check(guid, disabled_rules, username, password):
if guid in disabled_rules:
logging.info(f"Rule {guid} is in the disabled rules list, confirming that is is actually disabled...")
existing_rule = api_request("GET", guid, username, password)
if existing_rule:
if api_request("DELETE", guid, username, password):
logging.info(f"Successfully deleted rule {guid}")
return True, "deleted"
else:
logging.error(f"Error deleting rule {guid}.")
return True, "Error deleting"
return True, "NOP"
return False, None
def modify_pattern(condition, pattern):
"""
Modify the pattern based on the condition.
- 'end with': Add '*' to the beginning of the pattern.
- 'begin with': Add '*' to the end of the pattern.
- 'contains': Add '*' to both the beginning and end of the pattern.
"""
if isinstance(pattern, list):
# Apply modification to each pattern in the list if it's a list of patterns
return [modify_pattern(condition, p) for p in pattern]
if condition == "end with":
return f"*{pattern}"
elif condition == "begin with":
return f"{pattern}*"
elif condition == "contains":
return f"*{pattern}*"
return pattern
def process_rule_update_or_create(guid, json_entry, username, password):
existing_rule = api_request("GET", guid, username, password)
if existing_rule:
existing_rule_data = extract_relevant_fields(existing_rule)
new_rule_data = extract_relevant_fields(json_entry)
if generate_hash(existing_rule_data) != generate_hash(new_rule_data):
logging.info(f"Updating rule {guid}")
json_entry.pop("list_id", None)
api_request("PUT", guid, username, password, json_data=json_entry)
return "updated"
logging.info(f"Rule {guid} is up to date.")
return "no_change"
else:
logging.info(f"Creating new rule {guid}")
json_entry["list_id"] = "endpoint_event_filters"
api_request("POST", guid, username, password, json_data=json_entry)
return "new"
# Main function for processing rules
def process_rules(yaml_files, disabled_rules, username, password):
stats = {"rule_count": 0, "new": 0, "updated": 0, "no_change": 0, "disabled": 0, "deleted": 0}
for data in yaml_files:
logging.info(f"Processing rule: {data.get('id', '')}")
event_type = data.get('event_type', '')
guid = data.get('id', '')
dataset = DATASET_MAPPINGS.get(event_type, '')
context = data.get('description', '')
rule_deleted, state = disable_check(guid, disabled_rules, username, password)
if rule_deleted:
stats["disabled"] += 1
if state == "deleted":
stats["deleted"] += 1
continue
# Extract entries and build JSON
entries = extract_entries(data, event_type)
json_entry = build_json_entry(entries, guid, event_type, context)
# Process rule creation or update
status = process_rule_update_or_create(guid, json_entry, username, password)
stats[status] += 1
stats["rule_count"] += 1
return stats
def parse_args(argv):
try:
opts, args = getopt.getopt(argv, "i:d:c:f:", ["input=", "disabled=", "credentials=", "flags_file="])
except getopt.GetoptError:
print("Usage: python so-elastic-defend-manage-filters.py -c <credentials_file> -d <disabled_file> -i <folder_of_yaml_files> [-f <flags_file>]")
sys.exit(2)
return opts
def load_flags(file_path):
with open(file_path, 'r') as flags_file:
return flags_file.read().splitlines()
def validate_inputs(credentials_file, disabled_file, yaml_directories):
if not credentials_file or not disabled_file or not yaml_directories:
print("Usage: python so-elastic-defend-manage-filters.py -c <credentials_file> -d <disabled_file> -i <folder_of_yaml_files> [-f <flags_file>]")
sys.exit(2)
def main(argv):
credentials_file = ""
disabled_file = ""
yaml_directories = []
opts = parse_args(argv)
for opt, arg in opts:
if opt in ("-c", "--credentials"):
credentials_file = arg
elif opt in ("-d", "--disabled"):
disabled_file = arg
elif opt in ("-i", "--input"):
yaml_directories.append(arg)
elif opt in ("-f", "--flags_file"):
flags = load_flags(arg)
return main(argv + flags)
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"\n{timestamp}")
validate_inputs(credentials_file, disabled_file, yaml_directories)
credentials = load_credentials(credentials_file)
if not credentials:
raise Exception("Failed to load credentials")
username, password = extract_auth_details(credentials)
if not username or not password:
raise Exception("Invalid credentials format")
custom_rules_input = '/opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters-raw'
custom_rules_output = '/opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters'
prepare_custom_rules(custom_rules_input, custom_rules_output)
disabled_rules = load_disabled(disabled_file)
total_stats = {"rule_count": 0, "new": 0, "updated": 0, "no_change": 0, "disabled": 0, "deleted": 0}
for yaml_dir in yaml_directories:
yaml_files = load_yaml_files(yaml_dir)
stats = process_rules(yaml_files, disabled_rules, username, password)
for key in total_stats:
total_stats[key] += stats[key]
logging.info(f"\nProcessing Summary")
logging.info(f" - Total processed rules: {total_stats['rule_count']}")
logging.info(f" - New rules: {total_stats['new']}")
logging.info(f" - Updated rules: {total_stats['updated']}")
logging.info(f" - Disabled rules: {total_stats['deleted']}")
logging.info(f" - Rules with no changes: {total_stats['no_change']}")
logging.info(f"Rule status Summary")
logging.info(f" - Active rules: {total_stats['rule_count'] - total_stats['disabled']}")
logging.info(f" - Disabled rules: {total_stats['disabled']}")
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"Execution completed at: {timestamp}")
if __name__ == "__main__":
main(sys.argv[1:])

View File

@@ -97,84 +97,11 @@ elastic_fleet_package_install() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION" curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
} }
elastic_fleet_bulk_package_install() {
BULK_PKG_LIST=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$1 "localhost:5601/api/fleet/epm/packages/_bulk"
}
elastic_fleet_package_is_installed() { elastic_fleet_package_is_installed() {
PACKAGE=$1 PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status' curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
} }
elastic_fleet_installed_packages() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
}
elastic_fleet_agent_policy_ids() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
}
elastic_fleet_agent_policy_names() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
}
elastic_fleet_integration_policy_names() {
AGENT_POLICY=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_policy_package_name() {
AGENT_POLICY=$1
INTEGRATION=$2
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_policy_package_version() {
AGENT_POLICY=$1
INTEGRATION=$2
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version'
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve package version for '$INTEGRATION' in '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_id() {
AGENT_POLICY=$1
INTEGRATION=$2
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
exit 1
fi
}
elastic_fleet_integration_policy_dryrun_upgrade() {
INTEGRATION_ID=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
if [ $? -ne 0 ]; then
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
exit 1
fi
}
elastic_fleet_policy_create() { elastic_fleet_policy_create() {
NAME=$1 NAME=$1

View File

@@ -1,29 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-elastic-fleet-common
# Get all the fleet policies
json_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true')
# Extract the IDs that start with "FleetServer_"
POLICY=$(echo "$json_output" | jq -r '.items[] | select(.id | startswith("FleetServer_")) | .id')
# Iterate over each ID in the POLICY variable
for POLICYNAME in $POLICY; do
printf "\nUpdating Policy: $POLICYNAME\n"
# First get the Integration ID
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$POLICYNAME" | jq -r '.item.package_policies[] | select(.package.name == "fleet_server") | .id')
# Modify the default integration policy to update the policy_id and an with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "$POLICYNAME" --arg name "fleet_server-$POLICYNAME" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Now update the integration policy using the modified JSON
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
done

View File

@@ -12,10 +12,7 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# First, check for any package upgrades # First, check for any package upgrades
/usr/sbin/so-elastic-fleet-package-upgrade /usr/sbin/so-elastic-fleet-package-upgrade
# Second, update Fleet Server policies # Second, configure Elastic Defend Integration seperately
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
# Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints

View File

@@ -10,6 +10,6 @@
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}') SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
# List configured package policies # List configured package policies
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages?prerelease=true" -H 'kbn-xsrf: true' | jq curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages" -H 'kbn-xsrf: true' | jq
echo echo

View File

@@ -1,128 +0,0 @@
import hashlib
import os
import json
import yaml
import requests
from requests.auth import HTTPBasicAuth
import shutil
# Extract 'entries', 'description' and 'os_types' fields
def extract_relevant_fields(filter):
return {
'entries': filter.get('entries', []),
'description': filter.get('description', '')
}
# Sort for consistency, so that a hash can be generated
def sorted_data(value):
if isinstance(value, dict):
# Recursively sort the dictionary by key
return {k: sorted_data(v) for k, v in sorted(value.items())}
elif isinstance(value, list):
# Sort lists; for dictionaries, sort by a specific key
return sorted(value, key=lambda x: tuple(sorted(x.items())) if isinstance(x, dict) else x)
return value
# Generate a hash based on sorted relevant fields
def generate_hash(data):
sorted_data_string = json.dumps(sorted_data(data), sort_keys=True)
return hashlib.sha256(sorted_data_string.encode('utf-8')).hexdigest()
# Load Elasticsearch credentials from the config file
def load_credentials(config_path):
with open(config_path, 'r') as file:
for line in file:
if line.startswith("user"):
credentials = line.split('=', 1)[1].strip().strip('"')
return credentials
return None
# Extract username and password from credentials
def extract_auth_details(credentials):
if ':' in credentials:
return credentials.split(':', 1)
return None, None
# Generalized API request function
def api_request(method, guid, username, password, json_data=None):
headers = {
'kbn-xsrf': 'true',
'Content-Type': 'application/json'
}
auth = HTTPBasicAuth(username, password)
if method == "POST":
url = "http://localhost:5601/api/exception_lists/items?namespace_type=agnostic"
else:
url = f"http://localhost:5601/api/exception_lists/items?item_id={guid}&namespace_type=agnostic"
response = requests.request(method, url, headers=headers, auth=auth, json=json_data)
if response.status_code in [200, 201]:
return response.json() if response.content else True
elif response.status_code == 404 and method == "GET":
return None
else:
print(f"Error with {method} request: {response.status_code} - {response.text}")
return False
# Load YAML data for GUIDs to skip
def load_disabled(disabled_file_path):
if os.path.exists(disabled_file_path):
with open(disabled_file_path, 'r') as file:
return yaml.safe_load(file) or {}
return {}
def load_yaml_files(*dirs):
yaml_files = []
for dir_path in dirs:
if os.path.isdir(dir_path):
# Recurse through the directory and subdirectories
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if file_name.endswith(".yaml"):
full_path = os.path.join(root, file_name)
with open(full_path, 'r') as f:
try:
yaml_content = yaml.safe_load(f)
yaml_files.append(yaml_content)
except yaml.YAMLError as e:
print(f"Error loading {full_path}: {e}")
else:
print(f"Invalid directory: {dir_path}")
return yaml_files
def prepare_custom_rules(input_file, output_dir):
# Clear the output directory first
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir, exist_ok=True)
try:
# Load the YAML file
with open(input_file, 'r') as f:
docs = yaml.safe_load_all(f)
for doc in docs:
if 'id' not in doc:
print(f"Skipping rule, no 'id' found: {doc}")
continue
if doc.get('title') in ["Template 1", "Template 2"]:
print(f"Skipping template rule with title: {doc['title']}")
continue
# Create a filename using the 'id' field
file_name = os.path.join(output_dir, f"{doc['id']}.yaml")
# Write the individual YAML file
with open(file_name, 'w') as output_file:
yaml.dump(doc, output_file, default_flow_style=False)
print(f"Created file: {file_name}")
except yaml.YAMLError as e:
print(f"Error parsing YAML: {e}")
except Exception as e:
print(f"Error processing file: {e}")

View File

@@ -13,16 +13,13 @@
LOG="/opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log" LOG="/opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log"
# get the variables needed such as ELASTIC_AGENT_TARBALL_VERSION
get_elastic_agent_vars
# Check to see if we are already running # Check to see if we are already running
NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers") NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING gen installers script processes running...exiting." >>$LOG && exit 0 [ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING gen installers script processes running...exiting." >>$LOG && exit 0
for i in {1..30} for i in {1..30}
do do
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys?perPage=100" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',') FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',')
if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi
done done
@@ -39,7 +36,6 @@ printf "\n### Creating a temp directory at /nsm/elastic-agent-workspace\n"
rm -rf /nsm/elastic-agent-workspace rm -rf /nsm/elastic-agent-workspace
mkdir -p /nsm/elastic-agent-workspace mkdir -p /nsm/elastic-agent-workspace
printf "\n### Extracting outer tarball and then each individual tarball/zip\n" printf "\n### Extracting outer tarball and then each individual tarball/zip\n"
tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz -C /nsm/elastic-agent-workspace/ tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz -C /nsm/elastic-agent-workspace/
unzip -q /nsm/elastic-agent-workspace/elastic-agent-*.zip -d /nsm/elastic-agent-workspace/ unzip -q /nsm/elastic-agent-workspace/elastic-agent-*.zip -d /nsm/elastic-agent-workspace/
@@ -50,7 +46,7 @@ do
done done
printf "\n### Stripping out unused components" printf "\n### Stripping out unused components"
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*heart.*\|.*cloud.*' -delete find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete
printf "\n### Tarring everything up again" printf "\n### Tarring everything up again"
for OS in "${OSARCH[@]}" for OS in "${OSARCH[@]}"
@@ -76,17 +72,5 @@ do
printf "\n### $GOOS/$GOARCH Installer Generated...\n" printf "\n### $GOOS/$GOARCH Installer Generated...\n"
done done
printf "\n\n### Generating MSI...\n" printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace"
cp /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64 /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64.exe
docker run \
--mount type=bind,source=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/,target=/output/ -w /output \
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} wixl -o so-elastic-agent_windows_amd64_msi --arch x64 /workspace/so-elastic-agent.wxs
printf "\n### MSI Generated...\n"
printf "\n### Cleaning up temp files \n"
rm -rf /nsm/elastic-agent-workspace rm -rf /nsm/elastic-agent-workspace
rm -rf /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64.exe
printf "\n### Copying so_agent-installers to /nsm/elastic-fleet/ for nginx.\n"
\cp -vr /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/ /nsm/elastic-fleet/
chmod 644 /nsm/elastic-fleet/so_agent-installers/*

View File

@@ -1,11 +1,8 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
# Only run on Managers # Only run on Managers
if ! is_manager_node; then if ! is_manager_node; then
@@ -14,7 +11,7 @@ if ! is_manager_node; then
fi fi
# Get current list of Grid Node Agents that need to be upgraded # Get current list of Grid Node Agents that need to be upgraded
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true") RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=policy_id%20%3A%20so-grid-nodes_%2A&showInactive=false&showUpgradeable=true&getStatusSummary=true")
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
@@ -28,14 +25,14 @@ OUTDATED_LIST=$(jq -r '.items | map(.id) | (tojson)' <<< "$RAW_JSON")
if [ "$OUTDATED_LIST" != '[]' ]; then if [ "$OUTDATED_LIST" != '[]' ]; then
AGENTNUMBERS=$(jq -r '.total' <<< "$RAW_JSON") AGENTNUMBERS=$(jq -r '.total' <<< "$RAW_JSON")
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n" printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic $ELASTIC_AGENT_TARBALL_VERSION...\n\n"
# Generate updated JSON payload # Generate updated JSON payload
JSON_STRING=$(jq -n --arg ELASTICVERSION {{ELASTICSEARCHDEFAULTS.elasticsearch.version}} --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }') JSON_STRING=$(jq -n --arg ELASTICVERSION $ELASTIC_AGENT_TARBALL_VERSION --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
# Update Node Agents # Update Node Agents
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
else else
printf "No Agents need updates... Exiting\n\n" printf "No Agents need updates... Exiting\n\n"
exit 0 exit 0
fi fi

View File

@@ -1,90 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
# Only run on Managers
if ! is_manager_node; then
printf "Not a Manager Node... Exiting"
exit 0
fi
# Function to check if an array contains a value
array_contains () {
local array="$1[@]"
local seeking=$2
local in=1
for element in "${!array}"; do
if [[ $element == "$seeking" ]]; then
in=0
break
fi
done
return $in
}
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
LOGSTASHNODES='{{ salt['pillar.get']('logstash:nodes', {}) | tojson }}'
# Initialize an array for new hosts from Fleet Nodes
declare -a NEW_LIST=()
# Query for Fleet Nodes & add them to the list (Hostname)
if grep -q "fleet" <<< "$LOGSTASHNODES"; then
readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES")
for NODE in "${FLEETNODES[@]}"; do
URL="http://$NODE:8443/artifacts/"
NAME="FleetServer_$NODE"
NEW_LIST+=("$URL=$NAME")
done
fi
# Create an array for expected hosts and their names
declare -A expected_urls=(
["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}"
["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts"
)
# Merge NEW_LIST into expected_urls
for entry in "${NEW_LIST[@]}"; do
# Extract URL and Name from each entry
IFS='=' read -r URL NAME <<< "$entry"
# Add to expected_urls, automatically handling URL as key and NAME as value
expected_urls["$URL"]="$NAME"
done
# Fetch the current hosts from the API
current_urls=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host)
# Convert current hosts to an array
IFS=$'\n' read -rd '' -a current_urls_array <<<"$current_urls"
# Flag to track if any host was added
any_url_added=0
# Check each expected host
for host in "${!expected_urls[@]}"; do
array_contains current_urls_array "$host" || {
echo "$host (${expected_urls[$host]}) is missing. Adding it..."
# Prepare the JSON payload
JSON_STRING=$( jq -n \
--arg NAME "${expected_urls[$host]}" \
--arg URL "$host" \
'{"name":$NAME,"host":$URL}' )
# Create the missing host
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Flag that an artifact URL was added
any_url_added=1
}
done
if [[ $any_url_added -eq 0 ]]; then
echo "All expected artifact URLs are present. No updates needed."
fi

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.

View File

@@ -1,73 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
{%- set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
. /usr/sbin/so-elastic-fleet-common
curl_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/)
if [ $? -ne 0 ]; then
echo "Error: Failed to connect to Kibana."
exit 1
fi
IFS=$'\n'
agent_policies=$(elastic_fleet_agent_policy_ids)
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
for AGENT_POLICY in $agent_policies; do
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
for INTEGRATION in $integrations; do
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
# Get package name so we know what package to look for when checking the current and latest available version
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
{%- endif %}
# Get currently installed version of package
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
# Get latest available version of package
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
# Get integration ID
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
# Dry run of the upgrade
echo ""
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
echo "Upgrading $INTEGRATION..."
echo "Starting dry run..."
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
# If no errors with dry run, proceed with actual upgrade
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
echo "No errors detected. Proceeding with upgrade..."
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
if [ $? -ne 0 ]; then
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
exit 1
fi
else
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
exit 1
fi
fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
fi
{%- endif %}
fi
done
done
echo

View File

@@ -1,169 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% set SUB = salt['pillar.get']('elasticfleet:config:subscription_integrations', default=false) %}
{% set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
. /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common
# Check that /opt/so/state/estemplates.txt exists to signal that Elasticsearch
# has completed its first run of core-only integrations/indices/components/ilm
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
INSTALLED_PACKAGE_LIST=/tmp/esfleet_installed_packages.json
BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
PENDING_UPDATE=false
# Integrations which are included in the package registry, but excluded from automatic installation via this script.
# Requiring some level of manual Elastic Stack configuration before installation
EXCLUDED_INTEGRATIONS=('apm')
version_conversion(){
version=$1
echo "$version" | awk -F '.' '{ printf("%d%03d%03d\n", $1, $2, $3); }'
}
compare_versions() {
version1=$1
version2=$2
# Convert versions to numbers
num1=$(version_conversion "$version1")
num2=$(version_conversion "$version2")
# Compare using bc
if (( $(echo "$num1 < $num2" | bc -l) )); then
echo "less"
elif (( $(echo "$num1 > $num2" | bc -l) )); then
echo "greater"
else
echo "equal"
fi
}
IFS=$'\n'
agent_policies=$(elastic_fleet_agent_policy_ids)
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
in_use_integrations=()
for AGENT_POLICY in $agent_policies; do
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
for INTEGRATION in $integrations; do
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
# non-default integrations that are in-use in any policy
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
in_use_integrations+=("$PACKAGE_NAME")
fi
done
done
if [[ -f $STATE_FILE_SUCCESS ]]; then
if retry 3 1 "curl -s -K /opt/so/conf/elasticsearch/curl.config --output /dev/null --silent --head --fail localhost:5601/api/fleet/epm/packages"; then
# Package_list contains all integrations beta / non-beta.
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do
# get package details
package_name=$(echo "$package" | jq -r '.name')
latest_version=$(echo "$package" | jq -r '.latest_version')
installed_version=$(echo "$package" | jq -r '.installed_version')
subscription=$(echo "$package" | jq -r '.subscription')
bulk_package=$(echo "$package" | jq '{name: .name, version: .latest_version}' )
if [[ ! "${EXCLUDED_INTEGRATIONS[@]}" =~ "$package_name" ]]; then
{% if not SUB %}
if [[ "$subscription" != "basic" && "$subscription" != "null" && -n "$subscription" ]]; then
# pass over integrations that require non-basic elastic license
echo "$package_name integration requires an Elastic license of $subscription or greater... skipping"
continue
else
if [[ "$installed_version" == "null" || -z "$installed_version" ]]; then
echo "$package_name is not installed... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
else
results=$(compare_versions "$latest_version" "$installed_version")
if [ $results == "greater" ]; then
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
{%- endif %}
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
else
echo "skipping available upgrade for in use integration - $package_name."
fi
{%- endif %}
fi
fi
fi
{% else %}
if [[ "$installed_version" == "null" || -z "$installed_version" ]]; then
echo "$package_name is not installed... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
else
results=$(compare_versions "$latest_version" "$installed_version")
if [ $results == "greater" ]; then
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
{%- endif %}
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
else
echo "skipping available upgrade for in use integration - $package_name."
fi
{%- endif %}
fi
fi
{% endif %}
else
echo "Skipping $package_name..."
fi
done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")"
if [ "$PENDING_UPDATE" = true ]; then
# Run bulk install of packages
elastic_fleet_bulk_package_install $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_OUTPUT
else
echo "Elastic integrations don't appear to need installation/updating..."
fi
# Write out file for generating index/component/ilm templates
latest_installed_package_list=$(elastic_fleet_installed_packages)
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
else
# This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run.
echo "Elastic Fleet does not appear to be responding... Exiting... "
exit 0
fi
else
# This message will appear when an update to core integration is made and this script is run at the same time as
# elasticsearch.enabled -> detects change to core index settings -> deletes estemplates.txt
echo "Elasticsearch may not be fully configured yet or is currently updating core index settings."
exit 0
fi

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
@@ -21,104 +19,64 @@ function update_logstash_outputs() {
# Update Logstash Outputs # Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
} }
function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
JSON_STRING=$(jq -n \ # Get current list of Logstash Outputs
--arg UPDATEDLIST "$NEW_LIST_JSON" \ RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
# Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}
{% if GLOBALS.pipeline == "KAFKA" %} # Check to make sure that the server responded with good data - else, bail from script
# Get current list of Kafka Outputs CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka') if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi
# Check to make sure that the server responded with good data - else, bail from script # Get the current list of Logstash outputs & hash them
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_kafka" ]; then CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
printf "Failed to query for current Kafka Outputs..."
exit 1
fi
# Get the current list of kafka outputs & hash them declare -a NEW_LIST=()
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=()
# Query for the current Grid Nodes that are running kafka
KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local')
# Query for Kafka nodes with Broker role and add hostname to list
while IFS= read -r line; do
NEW_LIST+=("$line")
done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES)
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %}
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi
# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=()
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %}
# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}
# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %} {% endif %}
# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}
# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Sort & hash the new list of Logstash Outputs # Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
@@ -127,28 +85,9 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n" printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
# Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed
printf "Checking if the correct output policy is set as default\n"
OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON)
OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON)
if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then
printf "Default output policy needs to be updated.\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
else
printf "Default output policy is set - no update needed.\n"
fi
exit 0 exit 0
else else
printf "\nHashes don't match - update needed.\n" printf "\nHashes don't match - update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs update_logstash_outputs
{%- endif %}
fi fi

View File

@@ -53,8 +53,7 @@ fi
printf "\n### Create ES Token ###\n" printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value) ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
### Create Outputs, Fleet Policy and Fleet URLs ### ### Create Outputs & Fleet URLs ###
# Create the Manager Elasticsearch Output first and set it as the default output
printf "\nAdd Manager Elasticsearch Output...\n" printf "\nAdd Manager Elasticsearch Output...\n"
ESCACRT=$(openssl x509 -in $INTCA) ESCACRT=$(openssl x509 -in $INTCA)
JSON_STRING=$( jq -n \ JSON_STRING=$( jq -n \
@@ -63,21 +62,7 @@ JSON_STRING=$( jq -n \
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
printf "\n\n" printf "\n\n"
# Create the Manager Fleet Server Host Agent Policy printf "\nCreate Logstash Output Config if node is not an Import or Eval install\n"
# This has to be done while the Elasticsearch Output is set to the default Output
printf "Create Manager Fleet Server Policy...\n"
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
# Modify the default integration policy to update the policy_id with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Add the Fleet Server Integration to the new Fleet Policy
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
# Now we can create the Logstash Output and set it to to be the default Output
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %} {% if grains.role not in ['so-import', 'so-eval'] %}
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt) LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key) LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
@@ -92,11 +77,6 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
printf "\n\n" printf "\n\n"
{%- endif %} {%- endif %}
printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
/usr/sbin/so-kafka-fleet-output-policy
{% endif %}
# Add Manager Hostname & URL Base to Fleet Host URLs # Add Manager Hostname & URL Base to Fleet Host URLs
printf "\nAdd SO-Manager Fleet URL\n" printf "\nAdd SO-Manager Fleet URL\n"
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
@@ -116,6 +96,16 @@ printf "\n\n"
# Load Elasticsearch templates # Load Elasticsearch templates
/usr/sbin/so-elasticsearch-templates-load /usr/sbin/so-elasticsearch-templates-load
# Manager Fleet Server Host
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" "120"
#Temp Fixup for ES Output bug
JSON_STRING=$( jq -n \
--arg NAME "FleetServer_{{ GLOBALS.hostname }}" \
'{"name": $NAME,"description": $NAME,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_{{ GLOBALS.hostname }}" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Initial Endpoints Policy # Initial Endpoints Policy
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600" elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
@@ -170,4 +160,4 @@ salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node # Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers so-elastic-agent-gen-installers
salt-call state.apply elasticfleet.install_agent_grid queue=True salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0 exit 0

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.

View File

@@ -1,52 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
. /usr/sbin/so-common
# Check to make sure that Kibana API is up & ready
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
exit 1
fi
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
if ! echo "$output" | grep -q "so-manager_kafka"; then
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
KAFKA_OUTPUT_VERSION="2.6.0"
JSON_STRING=$( jq -n \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
)
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
exit 1
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
fi
elif echo "$output" | grep -q "so-manager_kafka"; then
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
fi
{% else %}
echo -e "\nNo update required...\n"
{% endif %}

View File

@@ -15,7 +15,7 @@
elastic_auth_pillar: elastic_auth_pillar:
file.managed: file.managed:
- name: /opt/so/saltstack/local/pillar/elasticsearch/auth.sls - name: /opt/so/saltstack/local/pillar/elasticsearch/auth.sls
- mode: 640 - mode: 600
- reload_pillar: True - reload_pillar: True
- contents: | - contents: |
elasticsearch: elasticsearch:

View File

@@ -4,7 +4,7 @@
# Elastic License 2.0. # Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA # Move our new CA over so Elastic and Logstash can use SSL with the internal CA

Some files were not shown because too many files have changed in this diff Show More