Compare commits

..

3 Commits

Author SHA1 Message Date
m0duspwnens
d91dd0dd3c watch some values 2024-04-29 17:14:00 -04:00
m0duspwnens
a0388fd568 engines config for valueWatch 2024-04-29 14:02:10 -04:00
m0duspwnens
05244cfd75 watch files change engine 2024-04-24 13:19:39 -04:00
953 changed files with 710985 additions and 217153 deletions

View File

@@ -536,11 +536,10 @@ secretGroup = 4
[allowlist] [allowlist]
description = "global allow lists" description = "global allow lists"
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''', '''integration_key\s=\s"so-logs-"'''] regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''']
paths = [ paths = [
'''gitleaks.toml''', '''gitleaks.toml''',
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
'''(go.mod|go.sum)$''', '''(go.mod|go.sum)$''',
'''salt/nginx/files/enterprise-attack.json''', '''salt/nginx/files/enterprise-attack.json'''
'''(.*?)whl$'''
] ]

View File

@@ -11,6 +11,7 @@ body:
description: Which version of Security Onion 2.4.x are you asking about? description: Which version of Security Onion 2.4.x are you asking about?
options: options:
- -
- 2.4 Pre-release (Beta, Release Candidate)
- 2.4.10 - 2.4.10
- 2.4.20 - 2.4.20
- 2.4.30 - 2.4.30
@@ -21,18 +22,6 @@ body:
- 2.4.80 - 2.4.80
- 2.4.90 - 2.4.90
- 2.4.100 - 2.4.100
- 2.4.110
- 2.4.111
- 2.4.120
- 2.4.130
- 2.4.140
- 2.4.141
- 2.4.150
- 2.4.160
- 2.4.170
- 2.4.180
- 2.4.190
- 2.4.200
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true
@@ -43,10 +32,9 @@ body:
options: options:
- -
- Security Onion ISO image - Security Onion ISO image
- Cloud image (Amazon, Azure, Google) - Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc.
- Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc. (unsupported) - Network installation on Ubuntu
- Network installation on Ubuntu (unsupported) - Network installation on Debian
- Network installation on Debian (unsupported)
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true

12
.github/ISSUE_TEMPLATE vendored Normal file
View File

@@ -0,0 +1,12 @@
PLEASE STOP AND READ THIS INFORMATION!
If you are creating an issue just to ask a question, you will likely get faster and better responses by posting to our discussions forum instead:
https://securityonion.net/discuss
If you think you have found a possible bug or are observing a behavior that you weren't expecting, use the discussion forum to start a conversation about it instead of creating an issue.
If you are very familiar with the latest version of the product and are confident you have found a bug in Security Onion, you can continue with creating an issue here, but please make sure you have done the following:
- duplicated the issue on a fresh installation of the latest version
- provide information about your system and how you installed Security Onion
- include relevant log files
- include reproduction steps

View File

@@ -1,38 +0,0 @@
---
name: Bug report
about: This option is for experienced community members to report a confirmed, reproducible bug
title: ''
labels: ''
assignees: ''
---
PLEASE STOP AND READ THIS INFORMATION!
If you are creating an issue just to ask a question, you will likely get faster and better responses by posting to our discussions forum at https://securityonion.net/discuss.
If you think you have found a possible bug or are observing a behavior that you weren't expecting, use the discussion forum at https://securityonion.net/discuss to start a conversation about it instead of creating an issue.
If you are very familiar with the latest version of the product and are confident you have found a bug in Security Onion, you can continue with creating an issue here, but please make sure you have done the following:
- duplicated the issue on a fresh installation of the latest version
- provide information about your system and how you installed Security Onion
- include relevant log files
- include reproduction steps
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Security Onion Discussions
url: https://securityonion.com/discussions
about: Please ask and answer questions here

View File

@@ -15,7 +15,6 @@ concurrency:
jobs: jobs:
close-threads: close-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
issues: write issues: write

View File

@@ -18,7 +18,7 @@ jobs:
with: with:
path-to-signatures: 'signatures_v1.json' path-to-signatures: 'signatures_v1.json'
path-to-document: 'https://securityonionsolutions.com/cla' path-to-document: 'https://securityonionsolutions.com/cla'
allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,defensivedepth,m0duspwnens allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,weslambert,defensivedepth,m0duspwnens
remote-organization-name: Security-Onion-Solutions remote-organization-name: Security-Onion-Solutions
remote-repository-name: licensing remote-repository-name: licensing

View File

@@ -15,7 +15,6 @@ concurrency:
jobs: jobs:
lock-threads: lock-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: jertel/lock-threads@main - uses: jertel/lock-threads@main

View File

@@ -1,6 +1,10 @@
name: python-test name: python-test
on: on:
push:
paths:
- "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin"
pull_request: pull_request:
paths: paths:
- "salt/sensoroni/files/analyzers/**" - "salt/sensoroni/files/analyzers/**"
@@ -13,7 +17,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.13"] python-version: ["3.10"]
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"] python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
steps: steps:
@@ -32,4 +36,4 @@ jobs:
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
- name: Test with pytest - name: Test with pytest
run: | run: |
PYTHONPATH=${{ matrix.python-code-path }} pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
# Created by https://www.gitignore.io/api/macos,windows # Created by https://www.gitignore.io/api/macos,windows
# Edit at https://www.gitignore.io/?templates=macos,windows # Edit at https://www.gitignore.io/?templates=macos,windows

View File

@@ -1,17 +1,17 @@
### 2.4.190-20251024 ISO image released on 2025/10/24 ### 2.4.60-20240320 ISO image released on 2024/03/20
### Download and Verify ### Download and Verify
2.4.190-20251024 ISO image: 2.4.60-20240320 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso
MD5: 25358481FB876226499C011FC0710358 MD5: 178DD42D06B2F32F3870E0C27219821E
SHA1: 0B26173C0CE136F2CA40A15046D1DFB78BCA1165 SHA1: 73EDCD50817A7F6003FE405CF1808A30D034F89D
SHA256: 4FD9F62EDA672408828B3C0C446FE5EA9FF3C4EE8488A7AB1101544A3C487872 SHA256: DD334B8D7088A7B78160C253B680D645E25984BA5CCAB5CC5C327CA72137FC06
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,29 +25,27 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.190-20251024.iso.sig securityonion-2.4.190-20251024.iso gpg --verify securityonion-2.4.60-20240320.iso.sig securityonion-2.4.60-20240320.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Thu 23 Oct 2025 07:21:46 AM EDT using RSA key ID FE507013 gpg: Signature made Tue 19 Mar 2024 03:17:58 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013 Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
``` ```
If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network.
Once you've verified the ISO image, you're ready to proceed to our Installation guide: Once you've verified the ISO image, you're ready to proceed to our Installation guide:
https://docs.securityonion.net/en/2.4/installation.html https://docs.securityonion.net/en/2.4/installation.html

53
LICENSE
View File

@@ -1,53 +0,0 @@
Elastic License 2.0 (ELv2)
Acceptance
By using the software, you agree to all of the terms and conditions below.
Copyright License
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below.
Limitations
You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software.
You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key.
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensors trademarks is subject to applicable law.
Patents
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
Notices
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
No Other Rights
These terms do not imply any licenses other than those expressly granted in these terms.
Termination
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
No Liability
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
Definitions
The licensor is the entity offering these terms, and the software is the software the licensor makes available under these terms, including any portion of it.
you refers to the individual or entity agreeing to these terms.
your company is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. control means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
your licenses are all the licenses granted to you for the software under these terms.
use means anything you do with the software requiring one of your licenses.
trademark means trademarks, service marks, and similar rights.

View File

@@ -8,22 +8,19 @@ Alerts
![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png) ![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png)
Dashboards Dashboards
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_dashboards.png) ![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/51_dashboards.png)
Hunt Hunt
![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png) ![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/52_hunt.png)
Detections
![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_detections.png)
PCAP PCAP
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png) ![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_pcap.png)
Grid Grid
![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/75_grid.png) ![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_grid.png)
Config Config
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/87_config.png) ![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/61_config.png)
### Release Notes ### Release Notes

View File

@@ -5,11 +5,9 @@
| Version | Supported | | Version | Supported |
| ------- | ------------------ | | ------- | ------------------ |
| 2.4.x | :white_check_mark: | | 2.4.x | :white_check_mark: |
| 2.3.x | :x: | | 2.3.x | :white_check_mark: |
| 16.04.x | :x: | | 16.04.x | :x: |
Security Onion 2.3 has reached End Of Life and is no longer supported.
Security Onion 16.04 has reached End Of Life and is no longer supported. Security Onion 16.04 has reached End Of Life and is no longer supported.
## Reporting a Vulnerability ## Reporting a Vulnerability

View File

@@ -1 +1 @@
2.4.200 2.4.70

View File

@@ -1,34 +0,0 @@
{% set node_types = {} %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='elasticsearch:enabled:true',
fun='network.ip_addrs',
tgt_type='pillar') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
elasticsearch:
nodes:
{% for node_type, values in node_types.items() %}
{{node_type}}:
{% for hostname, ip in values.items() %}
{{hostname}}:
ip: {{ip}}
{% endfor %}
{% endfor %}

View File

@@ -1,34 +0,0 @@
{% set node_types = {} %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='G@role:so-hypervisor or G@role:so-managerhype',
fun='network.ip_addrs',
tgt_type='compound') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
hypervisor:
nodes:
{% for node_type, values in node_types.items() %}
{{node_type}}:
{% for hostname, ip in values.items() %}
{{hostname}}:
ip: {{ip}}
{% endfor %}
{% endfor %}

View File

@@ -1,2 +1,30 @@
{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %}
{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %}
{% set existing_ids = [] %}
{% for node in pillar_kafkanodes.values() %}
{% if node.get('id') %}
{% do existing_ids.append(node['nodeid']) %}
{% endif %}
{% endfor %}
{% set all_possible_ids = range(1, 256)|list %}
{% set available_ids = [] %}
{% for id in all_possible_ids %}
{% if id not in existing_ids %}
{% do available_ids.append(id) %}
{% endif %}
{% endfor %}
{% set final_nodes = pillar_kafkanodes.copy() %}
{% for minionid, ip in current_kafkanodes.items() %}
{% set hostname = minionid.split('_')[0] %}
{% if hostname not in final_nodes %}
{% set new_id = available_ids.pop(0) %}
{% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %}
{% endif %}
{% endfor %}
kafka: kafka:
nodes: nodes: {{ final_nodes|tojson }}

View File

@@ -1,15 +1,16 @@
{% set node_types = {} %} {% set node_types = {} %}
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
{% for minionid, ip in salt.saltutil.runner( {% for minionid, ip in salt.saltutil.runner(
'mine.get', 'mine.get',
tgt='logstash:enabled:true', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ',
fun='network.ip_addrs', fun='network.ip_addrs',
tgt_type='pillar') | dictsort() tgt_type='compound') | dictsort()
%} %}
# only add a node to the pillar if it returned an ip from the mine # only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%} {% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %} {% set hostname = cached_grains[minionid]['host'] %}
{% set node_type = minionid.split('_') | last %} {% set node_type = minionid.split('_')[1] %}
{% if node_type not in node_types.keys() %} {% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %} {% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %} {% else %}

View File

@@ -24,7 +24,6 @@
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% if node_types %}
node_data: node_data:
{% for node_type, host_values in node_types.items() %} {% for node_type, host_values in node_types.items() %}
{% for hostname, details in host_values.items() %} {% for hostname, details in host_values.items() %}
@@ -34,6 +33,3 @@ node_data:
role: {{node_type}} role: {{node_type}}
{% endfor %} {% endfor %}
{% endfor %} {% endfor %}
{% else %}
node_data: False
{% endif %}

View File

@@ -1,34 +0,0 @@
{% set node_types = {} %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='redis:enabled:true',
fun='network.ip_addrs',
tgt_type='pillar') | dictsort()
%}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = minionid.split('_') | first %}
{% set node_type = minionid.split('_') | last %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
redis:
nodes:
{% for node_type, values in node_types.items() %}
{{node_type}}:
{% for hostname, ip in values.items() %}
{{hostname}}:
ip: {{ip}}
{% endfor %}
{% endfor %}

View File

@@ -16,24 +16,16 @@ base:
- sensoroni.adv_sensoroni - sensoroni.adv_sensoroni
- telegraf.soc_telegraf - telegraf.soc_telegraf
- telegraf.adv_telegraf - telegraf.adv_telegraf
- versionlock.soc_versionlock
- versionlock.adv_versionlock
- soc.license
'* and not *_desktop': '* and not *_desktop':
- firewall.soc_firewall - firewall.soc_firewall
- firewall.adv_firewall - firewall.adv_firewall
- nginx.soc_nginx - nginx.soc_nginx
- nginx.adv_nginx - nginx.adv_nginx
'salt-cloud:driver:libvirt':
- match: grain
- vm.soc_vm
- vm.adv_vm
'*_manager or *_managersearch or *_managerhype':
- match: compound
- node_data.ips - node_data.ips
'*_manager or *_managersearch':
- match: compound
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth - elasticsearch.auth
{% endif %} {% endif %}
@@ -43,23 +35,22 @@ base:
- secrets - secrets
- manager.soc_manager - manager.soc_manager
- manager.adv_manager - manager.adv_manager
- idstools.soc_idstools
- idstools.adv_idstools
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos - kratos.adv_kratos
- hydra.soc_hydra
- hydra.adv_hydra
- redis.nodes
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
- influxdb.adv_influxdb - influxdb.adv_influxdb
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -73,9 +64,6 @@ base:
- kafka.nodes - kafka.nodes
- kafka.soc_kafka - kafka.soc_kafka
- kafka.adv_kafka - kafka.adv_kafka
- hypervisor.nodes
- hypervisor.soc_hypervisor
- hypervisor.adv_hypervisor
- stig.soc_stig - stig.soc_stig
'*_sensor': '*_sensor':
@@ -93,9 +81,9 @@ base:
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig - stig.soc_stig
- soc.license
'*_eval': '*_eval':
- node_data.ips
- secrets - secrets
- healthcheck.eval - healthcheck.eval
- elasticsearch.index_templates - elasticsearch.index_templates
@@ -106,7 +94,6 @@ base:
- kibana.secrets - kibana.secrets
{% endif %} {% endif %}
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -115,14 +102,17 @@ base:
- elastalert.adv_elastalert - elastalert.adv_elastalert
- manager.soc_manager - manager.soc_manager
- manager.adv_manager - manager.adv_manager
- idstools.soc_idstools
- idstools.adv_idstools
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- strelka.soc_strelka - strelka.soc_strelka
- strelka.adv_strelka - strelka.adv_strelka
- hydra.soc_hydra - kratos.soc_kratos
- hydra.adv_hydra - kratos.adv_kratos
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
@@ -141,7 +131,6 @@ base:
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
'*_standalone': '*_standalone':
- node_data.ips
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
@@ -154,16 +143,14 @@ base:
{% endif %} {% endif %}
- secrets - secrets
- healthcheck.standalone - healthcheck.standalone
- idstools.soc_idstools
- idstools.adv_idstools
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos - kratos.adv_kratos
- hydra.soc_hydra
- hydra.adv_hydra
- redis.nodes
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
- influxdb.adv_influxdb - influxdb.adv_influxdb
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -174,6 +161,7 @@ base:
- manager.adv_manager - manager.adv_manager
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- strelka.soc_strelka - strelka.soc_strelka
@@ -227,21 +215,17 @@ base:
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
- elasticsearch.nodes
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth - elasticsearch.auth
{% endif %} {% endif %}
- redis.nodes
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig - stig.soc_stig
- kafka.nodes - soc.license
- kafka.soc_kafka
- kafka.adv_kafka
'*_receiver': '*_receiver':
- logstash.nodes - logstash.nodes
@@ -256,12 +240,9 @@ base:
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- kafka.nodes - kafka.nodes
- kafka.soc_kafka - kafka.soc_kafka
- stig.soc_stig - kafka.adv_kafka
- elasticfleet.soc_elasticfleet
- elasticfleet.adv_elasticfleet
'*_import': '*_import':
- node_data.ips
- secrets - secrets
- elasticsearch.index_templates - elasticsearch.index_templates
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
@@ -271,7 +252,6 @@ base:
- kibana.secrets - kibana.secrets
{% endif %} {% endif %}
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos
- elasticsearch.soc_elasticsearch - elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch - elasticsearch.adv_elasticsearch
- elasticfleet.soc_elasticfleet - elasticfleet.soc_elasticfleet
@@ -282,12 +262,13 @@ base:
- manager.adv_manager - manager.adv_manager
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- soc.license
- kibana.soc_kibana - kibana.soc_kibana
- kibana.adv_kibana - kibana.adv_kibana
- backup.soc_backup - backup.soc_backup
- backup.adv_backup - backup.adv_backup
- hydra.soc_hydra - kratos.soc_kratos
- hydra.adv_hydra - kratos.adv_kratos
- redis.soc_redis - redis.soc_redis
- redis.adv_redis - redis.adv_redis
- influxdb.soc_influxdb - influxdb.soc_influxdb
@@ -306,7 +287,6 @@ base:
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
'*_fleet': '*_fleet':
- node_data.ips
- backup.soc_backup - backup.soc_backup
- backup.adv_backup - backup.adv_backup
- logstash.nodes - logstash.nodes
@@ -316,15 +296,7 @@ base:
- elasticfleet.adv_elasticfleet - elasticfleet.adv_elasticfleet
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
'*_hypervisor':
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
'*_desktop': '*_desktop':
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig

12
pyci.sh
View File

@@ -15,16 +15,12 @@ TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin PATH=$PATH:/usr/local/bin
if [ ! -d .venv ]; then if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
python -m venv .venv echo "Missing dependencies. Consider running the following command:"
fi echo " python -m pip install flake8 pytest pytest-cov"
source .venv/bin/activate
if ! pip install flake8 pytest pytest-cov pyyaml; then
echo "Unable to install dependencies."
exit 1 exit 1
fi fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini" flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"

View File

@@ -1,91 +0,0 @@
#!/opt/saltstack/salt/bin/python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
"""
Salt execution module for hypervisor operations.
This module provides functions for managing hypervisor configurations,
including VM file management.
"""
import json
import logging
import os
log = logging.getLogger(__name__)
__virtualname__ = 'hypervisor'
def __virtual__():
"""
Only load this module if we're on a system that can manage hypervisors.
"""
return __virtualname__
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
"""
Remove a VM entry from the hypervisorVMs file.
Args:
vms_file_path (str): Path to the hypervisorVMs file
vm_hostname (str): Hostname of the VM to remove (without role suffix)
vm_role (str): Role of the VM
Returns:
dict: Result dictionary with success status and message
CLI Example:
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
"""
try:
# Check if file exists
if not os.path.exists(vms_file_path):
msg = f"VMs file not found: {vms_file_path}"
log.error(msg)
return {'result': False, 'comment': msg}
# Read current VMs
with open(vms_file_path, 'r') as f:
content = f.read().strip()
vms = json.loads(content) if content else []
# Find and remove the VM entry
original_count = len(vms)
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
if len(vms) < original_count:
# VM was found and removed, write back to file
with open(vms_file_path, 'w') as f:
json.dump(vms, f, indent=2)
# Set socore:socore ownership (939:939)
os.chown(vms_file_path, 939, 939)
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
log.info(msg)
return {'result': True, 'comment': msg}
else:
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
log.warning(msg)
return {'result': False, 'comment': msg}
except json.JSONDecodeError as e:
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
log.error(msg)
return {'result': False, 'comment': msg}
except Exception as e:
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
log.error(msg)
return {'result': False, 'comment': msg}

View File

@@ -1,335 +0,0 @@
#!py
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
"""
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network,
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
The module offers three main capabilities:
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
3. Volume Management: Create and attach virtual storage volumes for NSM data
This module is intended to work with Security Onion's virtualization infrastructure and is typically
used in conjunction with salt-cloud for VM provisioning and management.
"""
import logging
import subprocess
import shlex
log = logging.getLogger(__name__)
__virtualname__ = 'qcow2'
def __virtual__():
return __virtualname__
def modify_network_config(image, interface, mode, vm_name, ip4=None, gw4=None, dns4=None, search4=None):
'''
Usage:
salt '*' qcow2.modify_network_config image=<path> interface=<iface> mode=<mode> vm_name=<name> [ip4=<addr>] [gw4=<addr>] [dns4=<servers>] [search4=<domain>]
Options:
image
Path to the QCOW2 image file that will be modified
interface
Network interface name to configure (e.g., 'enp1s0')
mode
Network configuration mode, either 'dhcp4' or 'static4'
vm_name
Full name of the VM (hostname_role)
ip4
IPv4 address with CIDR notation (e.g., '192.168.1.10/24')
Required when mode='static4'
gw4
IPv4 gateway address (e.g., '192.168.1.1')
Required when mode='static4'
dns4
Comma-separated list of IPv4 DNS servers (e.g., '8.8.8.8,8.8.4.4')
Optional for both DHCP and static configurations
search4
DNS search domain for IPv4 (e.g., 'example.local')
Optional for both DHCP and static configurations
Examples:
1. **Configure DHCP:**
```bash
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='dhcp4'
```
This configures enp1s0 to use DHCP for IP assignment
2. **Configure Static IP:**
```bash
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='static4' ip4='192.168.1.10/24' gw4='192.168.1.1' dns4='192.168.1.1,8.8.8.8' search4='example.local'
```
This sets a static IP configuration with DNS servers and search domain
Notes:
- The QCOW2 image must be accessible and writable by the salt minion
- The image should not be in use by a running VM when modified
- Network changes take effect on next VM boot
- Requires so-qcow2-modify-network script to be installed
Description:
This function modifies network configuration within a QCOW2 image file by executing
the so-qcow2-modify-network script. It supports both DHCP and static IPv4 configuration.
The script mounts the image, modifies the network configuration files, and unmounts
safely. All operations are logged for troubleshooting purposes.
Exit Codes:
0: Success
1: Invalid parameters or configuration
2: Image access or mounting error
3: Network configuration error
4: System command error
255: Unexpected error
Logging:
- All operations are logged to the salt minion log
- Log entries are prefixed with 'qcow2 module:'
- Error conditions include detailed error messages and stack traces
- Success/failure status is logged for verification
'''
cmd = ['/usr/sbin/so-qcow2-modify-network', '-I', image, '-i', interface, '-n', vm_name]
if mode.lower() == 'dhcp4':
cmd.append('--dhcp4')
elif mode.lower() == 'static4':
cmd.append('--static4')
if not ip4 or not gw4:
raise ValueError('Both ip4 and gw4 are required for static configuration.')
cmd.extend(['--ip4', ip4, '--gw4', gw4])
if dns4:
cmd.extend(['--dns4', dns4])
if search4:
cmd.extend(['--search4', search4])
else:
raise ValueError("Invalid mode '{}'. Expected 'dhcp4' or 'static4'.".format(mode))
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
ret = {
'retcode': result.returncode,
'stdout': result.stdout,
'stderr': result.stderr
}
if result.returncode != 0:
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
else:
log.info('qcow2 module: Script executed successfully.')
return ret
except Exception as e:
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
raise
def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False):
'''
Usage:
salt '*' qcow2.modify_hardware_config vm_name=<name> [cpu=<count>] [memory=<size>] [pci=<id>] [pci=<id>] [start=<bool>]
Options:
vm_name
Name of the virtual machine to modify
cpu
Number of virtual CPUs to assign (positive integer)
Optional - VM's current CPU count retained if not specified
memory
Amount of memory to assign in MiB (positive integer)
Optional - VM's current memory size retained if not specified
pci
PCI hardware ID(s) to passthrough to the VM (e.g., '0000:c7:00.0')
Can be specified multiple times for multiple devices
Optional - no PCI passthrough if not specified
start
Boolean flag to start the VM after modification
Optional - defaults to False
Examples:
1. **Modify CPU and Memory:**
```bash
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=4 memory=8192
```
This assigns 4 CPUs and 8GB memory to the VM
2. **Enable PCI Passthrough:**
```bash
salt '*' qcow2.modify_hardware_config vm_name='sensor1' pci='0000:c7:00.0' pci='0000:c4:00.0' start=True
```
This configures PCI passthrough and starts the VM
3. **Complete Hardware Configuration:**
```bash
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=8 memory=16384 pci='0000:c7:00.0' start=True
```
This sets CPU, memory, PCI passthrough, and starts the VM
Notes:
- VM must be stopped before modification unless only the start flag is set
- Memory is specified in MiB (1024 = 1GB)
- PCI devices must be available and not in use by the host
- CPU count should align with host capabilities
- Requires so-kvm-modify-hardware script to be installed
Description:
This function modifies the hardware configuration of a KVM virtual machine using
the so-kvm-modify-hardware script. It can adjust CPU count, memory allocation,
and PCI device passthrough. Changes are applied to the VM's libvirt configuration.
The VM can optionally be started after modifications are complete.
Exit Codes:
0: Success
1: Invalid parameters
2: VM state error (running when should be stopped)
3: Hardware configuration error
4: System command error
255: Unexpected error
Logging:
- All operations are logged to the salt minion log
- Log entries are prefixed with 'qcow2 module:'
- Hardware configuration changes are logged
- Errors include detailed messages and stack traces
- Final status of modification is logged
'''
cmd = ['/usr/sbin/so-kvm-modify-hardware', '-v', vm_name]
if cpu is not None:
if isinstance(cpu, int) and cpu > 0:
cmd.extend(['-c', str(cpu)])
else:
raise ValueError('cpu must be a positive integer.')
if memory is not None:
if isinstance(memory, int) and memory > 0:
cmd.extend(['-m', str(memory)])
else:
raise ValueError('memory must be a positive integer.')
if pci:
# Handle PCI IDs (can be a single device or comma-separated list)
if isinstance(pci, str):
devices = [dev.strip() for dev in pci.split(',') if dev.strip()]
elif isinstance(pci, list):
devices = pci
else:
devices = [pci]
# Add each device with its own -p flag
for device in devices:
cmd.extend(['-p', str(device)])
if start:
cmd.append('-s')
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
ret = {
'retcode': result.returncode,
'stdout': result.stdout,
'stderr': result.stderr
}
if result.returncode != 0:
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
else:
log.info('qcow2 module: Script executed successfully.')
return ret
except Exception as e:
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
raise
def create_volume_config(vm_name, size_gb, start=False):
'''
Usage:
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
Options:
vm_name
Name of the virtual machine to attach the volume to
size_gb
Volume size in GB (positive integer)
This determines the capacity of the virtual storage volume
start
Boolean flag to start the VM after volume creation
Optional - defaults to False
Examples:
1. **Create 500GB Volume:**
```bash
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
```
This creates a 500GB virtual volume for NSM storage
2. **Create 1TB Volume and Start VM:**
```bash
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
```
This creates a 1TB volume and starts the VM after attachment
Notes:
- VM must be stopped before volume creation
- Volume is created as a qcow2 image and attached to the VM
- This is an alternative to disk passthrough via modify_hardware_config
- Volume is automatically attached to the VM's libvirt configuration
- Requires so-kvm-create-volume script to be installed
- Volume files are stored in the hypervisor's VM storage directory
Description:
This function creates and attaches a virtual storage volume to a KVM virtual machine
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
This provides an alternative to physical disk passthrough, allowing flexible storage
allocation without requiring dedicated hardware. The VM can optionally be started
after the volume is successfully created and attached.
Exit Codes:
0: Success
1: Invalid parameters
2: VM state error (running when should be stopped)
3: Volume creation error
4: System command error
255: Unexpected error
Logging:
- All operations are logged to the salt minion log
- Log entries are prefixed with 'qcow2 module:'
- Volume creation and attachment operations are logged
- Errors include detailed messages and stack traces
- Final status of volume creation is logged
'''
# Validate size_gb parameter
if not isinstance(size_gb, int) or size_gb <= 0:
raise ValueError('size_gb must be a positive integer.')
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
if start:
cmd.append('-S')
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
ret = {
'retcode': result.returncode,
'stdout': result.stdout,
'stderr': result.stderr
}
if result.returncode != 0:
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
else:
log.info('qcow2 module: Script executed successfully.')
return ret
except Exception as e:
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
raise

File diff suppressed because it is too large Load Diff

View File

@@ -1,179 +1,253 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #} # Elastic License 2.0.
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %} {% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %} {% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
{% set saltversion = saltversion.salt.minion.version %} {% set saltversion = saltversion.salt.minion.version %}
{# Define common state groups to reduce redundancy #} {# this is the list we are returning from this map file, it gets built below #}
{% set base_states = [ {% set allowed_states= [] %}
'common',
'patch.os.schedule',
'motd',
'salt.minion-check',
'sensoroni',
'salt.lasthighstate',
'salt.minion'
] %}
{% set ssl_states = [ {% if grains.saltversion | string == saltversion | string %}
{% set allowed_states= salt['grains.filter_by']({
'so-eval': [
'salt.master',
'ca',
'ssl',
'registry',
'manager',
'nginx',
'telegraf',
'influxdb',
'soc',
'kratos',
'elasticfleet',
'elastic-fleet-package-registry',
'firewall',
'idstools',
'suricata.manager',
'healthcheck',
'pcap',
'suricata',
'utility',
'schedule',
'tcpreplay',
'docker_clean'
],
'so-heavynode': [
'ssl',
'nginx',
'telegraf',
'firewall',
'pcap',
'suricata',
'healthcheck',
'elasticagent',
'schedule',
'tcpreplay',
'docker_clean'
],
'so-idh': [
'ssl',
'telegraf',
'firewall',
'idh',
'schedule',
'docker_clean'
],
'so-import': [
'salt.master',
'ca',
'ssl',
'registry',
'manager',
'nginx',
'soc',
'kratos',
'influxdb',
'telegraf',
'firewall',
'idstools',
'suricata.manager',
'pcap',
'utility',
'suricata',
'zeek',
'schedule',
'tcpreplay',
'docker_clean',
'elasticfleet',
'elastic-fleet-package-registry'
],
'so-manager': [
'salt.master',
'ca',
'ssl',
'registry',
'manager',
'nginx',
'telegraf',
'influxdb',
'soc',
'kratos',
'elasticfleet',
'elastic-fleet-package-registry',
'firewall',
'idstools',
'suricata.manager',
'utility',
'schedule',
'docker_clean',
'stig',
'kafka'
],
'so-managersearch': [
'salt.master',
'ca',
'ssl',
'registry',
'nginx',
'telegraf',
'influxdb',
'soc',
'kratos',
'elastic-fleet-package-registry',
'elasticfleet',
'firewall',
'manager',
'idstools',
'suricata.manager',
'utility',
'schedule',
'docker_clean',
'stig',
'kafka'
],
'so-searchnode': [
'ssl',
'nginx',
'telegraf',
'firewall',
'schedule',
'docker_clean',
'stig'
],
'so-standalone': [
'salt.master',
'ca',
'ssl',
'registry',
'manager',
'nginx',
'telegraf',
'influxdb',
'soc',
'kratos',
'elastic-fleet-package-registry',
'elasticfleet',
'firewall',
'idstools',
'suricata.manager',
'pcap',
'suricata',
'healthcheck',
'utility',
'schedule',
'tcpreplay',
'docker_clean',
'stig',
'kafka'
],
'so-sensor': [
'ssl',
'telegraf',
'firewall',
'nginx',
'pcap',
'suricata',
'healthcheck',
'schedule',
'tcpreplay',
'docker_clean',
'stig'
],
'so-fleet': [
'ssl',
'telegraf',
'firewall',
'logstash',
'nginx',
'healthcheck',
'schedule',
'elasticfleet',
'docker_clean'
],
'so-receiver': [
'ssl', 'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
'docker_clean' 'docker_clean',
] %} 'kafka',
'elasticsearch.ca'
],
'so-desktop': [
'ssl',
'docker_clean',
'telegraf'
],
}, grain='role') %}
{% set manager_states = [ {%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
'salt.master', {% do allowed_states.append('zeek') %}
'ca', {%- endif %}
'registry',
'manager',
'nginx',
'influxdb',
'soc',
'kratos',
'hydra',
'elasticfleet',
'elastic-fleet-package-registry',
'suricata.manager',
'utility'
] %}
{% set sensor_states = [ {% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
'pcap', {% do allowed_states.append('strelka') %}
'suricata',
'healthcheck',
'tcpreplay',
'zeek',
'strelka'
] %}
{% set kafka_states = [
'kafka'
] %}
{% set stig_states = [
'stig'
] %}
{% set elastic_stack_states = [
'elasticsearch',
'elasticsearch.auth',
'kibana',
'kibana.secrets',
'elastalert',
'logstash',
'redis'
] %}
{# Initialize the allowed_states list #}
{% set allowed_states = [] %}
{% if grains.saltversion | string == saltversion | string %}
{# Map role-specific states #}
{% set role_states = {
'so-eval': (
ssl_states +
manager_states +
sensor_states +
elastic_stack_states | reject('equalto', 'logstash') | list
),
'so-heavynode': (
ssl_states +
sensor_states +
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
),
'so-idh': (
ssl_states +
['idh']
),
'so-import': (
ssl_states +
manager_states +
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
),
'so-manager': (
ssl_states +
manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states +
kafka_states +
elastic_stack_states
),
'so-managerhype': (
ssl_states +
manager_states +
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
stig_states +
kafka_states +
elastic_stack_states
),
'so-managersearch': (
ssl_states +
manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states +
kafka_states +
elastic_stack_states
),
'so-searchnode': (
ssl_states +
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
stig_states
),
'so-standalone': (
ssl_states +
manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
sensor_states +
stig_states +
kafka_states +
elastic_stack_states
),
'so-sensor': (
ssl_states +
sensor_states +
['nginx'] +
stig_states
),
'so-fleet': (
ssl_states +
stig_states +
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
),
'so-receiver': (
ssl_states +
kafka_states +
stig_states +
['logstash', 'redis']
),
'so-hypervisor': (
ssl_states +
stig_states +
['hypervisor', 'libvirt']
),
'so-desktop': (
['ssl', 'docker_clean', 'telegraf'] +
stig_states
)
} %}
{# Get states for the current role #}
{% if grains.role in role_states %}
{% set allowed_states = role_states[grains.role] %}
{% endif %} {% endif %}
{# Add base states that apply to all roles #} {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
{% for state in base_states %} {% do allowed_states.append('elasticsearch') %}
{% do allowed_states.append(state) %}
{% endfor %}
{% endif %} {% endif %}
{# Add airgap state if needed #} {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
{% do allowed_states.append('elasticsearch.auth') %}
{% endif %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
{% do allowed_states.append('kibana') %}
{% do allowed_states.append('kibana.secrets') %}
{% endif %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
{% do allowed_states.append('elastalert') %}
{% endif %}
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
{% do allowed_states.append('logstash') %}
{% endif %}
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
{% do allowed_states.append('redis') %}
{% endif %}
{# all nodes on the right salt version can run the following states #}
{% do allowed_states.append('common') %}
{% do allowed_states.append('patch.os.schedule') %}
{% do allowed_states.append('motd') %}
{% do allowed_states.append('salt.minion-check') %}
{% do allowed_states.append('sensoroni') %}
{% do allowed_states.append('salt.lasthighstate') %}
{% endif %}
{% if ISAIRGAP %} {% if ISAIRGAP %}
{% do allowed_states.append('airgap') %} {% do allowed_states.append('airgap') %}
{% endif %} {% endif %}
{# all nodes can always run salt.minion state #}
{% do allowed_states.append('salt.minion') %}

View File

@@ -4,5 +4,4 @@ backup:
- /etc/pki - /etc/pki
- /etc/salt - /etc/salt
- /nsm/kratos - /nsm/kratos
- /nsm/hydra
destination: "/nsm/backup" destination: "/nsm/backup"

View File

@@ -11,10 +11,6 @@ TODAY=$(date '+%Y_%m_%d')
BACKUPDIR={{ DESTINATION }} BACKUPDIR={{ DESTINATION }}
BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar" BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar"
MAXBACKUPS=7 MAXBACKUPS=7
EXCLUSIONS=(
"--exclude=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers"
)
# Create backup dir if it does not exist # Create backup dir if it does not exist
mkdir -p /nsm/backup mkdir -p /nsm/backup
@@ -27,7 +23,7 @@ if [ ! -f $BACKUPFILE ]; then
# Loop through all paths defined in global.sls, and append them to backup file # Loop through all paths defined in global.sls, and append them to backup file
{%- for LOCATION in BACKUPLOCATIONS %} {%- for LOCATION in BACKUPLOCATIONS %}
tar -rf $BACKUPFILE "${EXCLUSIONS[@]}" {{ LOCATION }} tar -rf $BACKUPFILE {{ LOCATION }}
{%- endfor %} {%- endfor %}
fi fi

View File

@@ -1,3 +1,6 @@
mine_functions:
x509.get_pem_entries: [/etc/pki/ca.crt]
x509_signing_policies: x509_signing_policies:
filebeat: filebeat:
- minions: '*' - minions: '*'

View File

@@ -1,21 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
{% if nsm_exists %}
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
nsm_total:
grains.present:
- name: nsm_total
- value: {{ nsm_total }}
{% else %}
nsm_missing:
test.succeed_without_changes:
- name: /nsm does not exist, skipping grain assignment
{% endif %}

View File

@@ -4,7 +4,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- common.grains
- common.packages - common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state - manager.elasticsearch # needed for elastic_curl_config state
@@ -15,11 +14,6 @@ net.core.wmem_default:
sysctl.present: sysctl.present:
- value: 26214400 - value: 26214400
# Users are not a fan of console messages
kernel.printk:
sysctl.present:
- value: "3 4 1 3"
# Remove variables.txt from /tmp - This is temp # Remove variables.txt from /tmp - This is temp
rmvariablesfile: rmvariablesfile:
file.absent: file.absent:
@@ -107,7 +101,7 @@ Etc/UTC:
timezone.system timezone.system
# Sync curl configuration for Elasticsearch authentication # Sync curl configuration for Elasticsearch authentication
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %} {% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
elastic_curl_config: elastic_curl_config:
file.managed: file.managed:
- name: /opt/so/conf/elasticsearch/curl.config - name: /opt/so/conf/elasticsearch/curl.config
@@ -129,11 +123,6 @@ common_sbin:
- user: 939 - user: 939
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
- show_changes: False
{% if GLOBALS.role == 'so-heavynode' %}
- exclude_pat:
- so-pcap-import
{% endif %}
common_sbin_jinja: common_sbin_jinja:
file.recurse: file.recurse:
@@ -143,21 +132,6 @@ common_sbin_jinja:
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
- template: jinja - template: jinja
- show_changes: False
{% if GLOBALS.role == 'so-heavynode' %}
- exclude_pat:
- so-import-pcap
{% endif %}
{% if GLOBALS.role == 'so-heavynode' %}
remove_so-pcap-import_heavynode:
file.absent:
- name: /usr/sbin/so-pcap-import
remove_so-import-pcap_heavynode:
file.absent:
- name: /usr/sbin/so-import-pcap
{% endif %}
{% if not GLOBALS.is_manager%} {% if not GLOBALS.is_manager%}
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers # prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
@@ -203,7 +177,6 @@ sostatus_log:
file.managed: file.managed:
- name: /opt/so/log/sostatus/status.log - name: /opt/so/log/sostatus/status.log
- mode: 644 - mode: 644
- replace: False
# Install sostatus check cron. This is used to populate Grid. # Install sostatus check cron. This is used to populate Grid.
so-status_check_cron: so-status_check_cron:

View File

@@ -1,6 +1,6 @@
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init {% from 'vars/globals.map.jinja' import GLOBALS %}
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
{% if grains.os_family == 'Debian' %} {% if GLOBALS.os_family == 'Debian' %}
commonpkgs: commonpkgs:
pkg.installed: pkg.installed:
- skip_suggestions: True - skip_suggestions: True
@@ -27,7 +27,6 @@ commonpkgs:
- vim - vim
- tar - tar
- unzip - unzip
- bc
{% if grains.oscodename != 'focal' %} {% if grains.oscodename != 'focal' %}
- python3-rich - python3-rich
{% endif %} {% endif %}
@@ -46,7 +45,7 @@ python-rich:
{% endif %} {% endif %}
{% endif %} {% endif %}
{% if grains.os_family == 'RedHat' %} {% if GLOBALS.os_family == 'RedHat' %}
remove_mariadb: remove_mariadb:
pkg.removed: pkg.removed:
@@ -57,7 +56,6 @@ commonpkgs:
- skip_suggestions: True - skip_suggestions: True
- pkgs: - pkgs:
- python3-dnf-plugin-versionlock - python3-dnf-plugin-versionlock
- bc
- curl - curl
- device-mapper-persistent-data - device-mapper-persistent-data
- fuse - fuse

View File

@@ -1,8 +1,3 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %} {% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} {% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
@@ -11,7 +6,6 @@
{% else %} {% else %}
{% set UPDATE_DIR='/tmp/sogh/securityonion' %} {% set UPDATE_DIR='/tmp/sogh/securityonion' %}
{% endif %} {% endif %}
{% set SOVERSION = salt['file.read']('/etc/soversion').strip() %}
remove_common_soup: remove_common_soup:
file.absent: file.absent:
@@ -21,8 +15,6 @@ remove_common_so-firewall:
file.absent: file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
# This section is used to put the scripts in place in the Salt file system
# in case a state run tries to overwrite what we do in the next section.
copy_so-common_common_tools_sbin: copy_so-common_common_tools_sbin:
file.copy: file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common - name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
@@ -51,27 +43,6 @@ copy_so-firewall_manager_tools_sbin:
- force: True - force: True
- preserve: True - preserve: True
copy_so-yaml_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
copy_so-repo-sync_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- preserve: True
copy_bootstrap-salt_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/salt/scripts/bootstrap-salt.sh
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
- preserve: True
# This section is used to put the new script in place so that it can be called during soup.
# It is faster than calling the states that normally manage them to put them in place.
copy_so-common_sbin: copy_so-common_sbin:
file.copy: file.copy:
- name: /usr/sbin/so-common - name: /usr/sbin/so-common
@@ -107,31 +78,6 @@ copy_so-yaml_sbin:
- force: True - force: True
- preserve: True - preserve: True
copy_so-repo-sync_sbin:
file.copy:
- name: /usr/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- force: True
- preserve: True
copy_bootstrap-salt_sbin:
file.copy:
- name: /usr/sbin/bootstrap-salt.sh
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
- force: True
- preserve: True
{# this is added in 2.4.120 to remove salt repo files pointing to saltproject.io to accomodate the move to broadcom and new bootstrap-salt script #}
{% if salt['pkg.version_cmp'](SOVERSION, '2.4.120') == -1 %}
{% set saltrepofile = '/etc/yum.repos.d/salt.repo' %}
{% if grains.os_family == 'Debian' %}
{% set saltrepofile = '/etc/apt/sources.list.d/salt.list' %}
{% endif %}
remove_saltproject_io_repo_manager:
file.absent:
- name: {{ saltrepofile }}
{% endif %}
{% else %} {% else %}
fix_23_soup_sbin: fix_23_soup_sbin:
cmd.run: cmd.run:

View File

@@ -5,13 +5,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
cat << EOF salt-call state.highstate -l info
so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see:
https://docs.securityonion.net/en/2.4/salt.html
EOF
salt-call state.highstate -l info queue=True

View File

@@ -8,6 +8,12 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a # Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files, # script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file. # and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
DEFAULT_SALT_DIR=/opt/so/saltstack/default DEFAULT_SALT_DIR=/opt/so/saltstack/default
DOC_BASE_URL="https://docs.securityonion.net/en/2.4" DOC_BASE_URL="https://docs.securityonion.net/en/2.4"
@@ -25,11 +31,6 @@ if ! echo "$PATH" | grep -q "/usr/sbin"; then
export PATH="$PATH:/usr/sbin" export PATH="$PATH:/usr/sbin"
fi fi
# See if a proxy is set. If so use it.
if [ -f /etc/profile.d/so-proxy.sh ]; then
. /etc/profile.d/so-proxy.sh
fi
# Define a banner to separate sections # Define a banner to separate sections
banner="=========================================================================" banner="========================================================================="
@@ -99,17 +100,6 @@ add_interface_bond0() {
fi fi
} }
airgap_playbooks() {
SRC_DIR=$1
# Copy playbooks if using airgap
mkdir -p /nsm/airgap-resources
# Purge old airgap playbooks to ensure SO only uses the latest released playbooks
rm -fr /nsm/airgap-resources/playbooks
tar xf $SRC_DIR/airgap-resources/playbooks.tgz -C /nsm/airgap-resources/
chown -R socore:socore /nsm/airgap-resources/playbooks
git config --global --add safe.directory /nsm/airgap-resources/playbooks
}
check_container() { check_container() {
docker ps | grep "$1:" > /dev/null 2>&1 docker ps | grep "$1:" > /dev/null 2>&1
return $? return $?
@@ -179,81 +169,16 @@ check_salt_minion_status() {
return $status return $status
} }
# Compare es versions and return the highest version
compare_es_versions() {
# Save the original IFS
local OLD_IFS="$IFS"
IFS=.
local i ver1=($1) ver2=($2)
# Restore the original IFS
IFS="$OLD_IFS"
# Determine the maximum length between the two version arrays
local max_len=${#ver1[@]}
if [[ ${#ver2[@]} -gt $max_len ]]; then
max_len=${#ver2[@]}
fi
# Compare each segment of the versions
for ((i=0; i<max_len; i++)); do
# If a segment in ver1 or ver2 is missing, set it to 0
if [[ -z ${ver1[i]} ]]; then
ver1[i]=0
fi
if [[ -z ${ver2[i]} ]]; then
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
echo "$1"
return 0
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
echo "$2"
return 0
fi
done
echo "$1" # If versions are equal, return either
return 0
}
copy_new_files() { copy_new_files() {
# Define files to exclude from deletion (relative to their respective base directories)
local EXCLUDE_FILES=(
"salt/hypervisor/soc_hypervisor.yaml"
)
# Build rsync exclude arguments
local EXCLUDE_ARGS=()
for file in "${EXCLUDE_FILES[@]}"; do
EXCLUDE_ARGS+=(--exclude="$file")
done
# Copy new files over to the salt dir # Copy new files over to the salt dir
cd $UPDATE_DIR cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}" rsync -a salt $DEFAULT_SALT_DIR/ --delete
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}" rsync -a pillar $DEFAULT_SALT_DIR/ --delete
chown -R socore:socore $DEFAULT_SALT_DIR/ chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp cd /tmp
} }
create_local_directories() {
echo "Creating local pillar and salt directories if needed"
PILLARSALTDIR=$1
local_salt_dir="/opt/so/saltstack/local"
for i in "pillar" "salt"; do
for d in $(find $PILLARSALTDIR/$i -type d); do
suffixdir=${d//$PILLARSALTDIR/}
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
mkdir -p $local_salt_dir$suffixdir
fi
done
chown -R socore:socore $local_salt_dir/$i
done
}
disable_fastestmirror() { disable_fastestmirror() {
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
} }
@@ -318,36 +243,19 @@ fail() {
exit 1 exit 1
} }
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}' | sed 's/,//')
[[ -z "$AGENTCOUNT" ]] && AGENTCOUNT="0"
else
AGENTCOUNT=0
fi
}
get_elastic_agent_vars() {
local path="${1:-/opt/so/saltstack/default}"
local defaultsfile="${path}/salt/elasticsearch/defaults.yaml"
if [ -f "$defaultsfile" ]; then
ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]')
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
else
fail "Could not find salt/elasticsearch/defaults.yaml"
fi
}
get_random_value() { get_random_value() {
length=${1:-20} length=${1:-20}
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1 head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
} }
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
else
AGENTCOUNT=0
fi
}
gpg_rpm_import() { gpg_rpm_import() {
if [[ $is_oracle ]]; then if [[ $is_oracle ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
@@ -451,7 +359,8 @@ lookup_grain() {
lookup_role() { lookup_role() {
id=$(lookup_grain id) id=$(lookup_grain id)
echo "${id##*_}" pieces=($(echo $id | tr '_' ' '))
echo ${pieces[1]}
} }
is_feature_enabled() { is_feature_enabled() {
@@ -698,8 +607,6 @@ has_uppercase() {
} }
update_elastic_agent() { update_elastic_agent() {
local path="${1:-/opt/so/saltstack/default}"
get_elastic_agent_vars "$path"
echo "Checking if Elastic Agent update is necessary..." echo "Checking if Elastic Agent update is necessary..."
download_and_verify "$ELASTIC_AGENT_URL" "$ELASTIC_AGENT_MD5_URL" "$ELASTIC_AGENT_FILE" "$ELASTIC_AGENT_MD5" "$ELASTIC_AGENT_EXPANSION_DIR" download_and_verify "$ELASTIC_AGENT_URL" "$ELASTIC_AGENT_MD5_URL" "$ELASTIC_AGENT_FILE" "$ELASTIC_AGENT_MD5" "$ELASTIC_AGENT_EXPANSION_DIR"
} }

View File

@@ -45,7 +45,7 @@ def check_for_fps():
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE) result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
if result.returncode == 0: if result.returncode == 0:
fps = 1 fps = 1
except: except FileNotFoundError:
fn = '/proc/sys/crypto/' + feat_full + '_enabled' fn = '/proc/sys/crypto/' + feat_full + '_enabled'
try: try:
with open(fn, 'r') as f: with open(fn, 'r') as f:

View File

@@ -4,16 +4,22 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
import sys, argparse, re, subprocess, json
import sys, argparse, re, docker
from packaging.version import Version, InvalidVersion from packaging.version import Version, InvalidVersion
from itertools import groupby, chain from itertools import groupby, chain
def get_image_name(string) -> str: def get_image_name(string) -> str:
return ':'.join(string.split(':')[:-1]) return ':'.join(string.split(':')[:-1])
def get_so_image_basename(string) -> str: def get_so_image_basename(string) -> str:
return get_image_name(string).split('/so-')[-1] return get_image_name(string).split('/so-')[-1]
def get_image_version(string) -> str: def get_image_version(string) -> str:
ver = string.split(':')[-1] ver = string.split(':')[-1]
if ver == 'latest': if ver == 'latest':
@@ -29,49 +35,33 @@ def get_image_version(string) -> str:
return '999999.9.9' return '999999.9.9'
return ver return ver
def run_command(command):
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if process.returncode != 0:
print(f"Error executing command: {command}", file=sys.stderr)
print(f"Error message: {process.stderr}", file=sys.stderr)
exit(1)
return process.stdout
def main(quiet): def main(quiet):
try: client = docker.from_env()
# Prune old/stopped containers using docker CLI
# Prune old/stopped containers
if not quiet: print('Pruning old containers') if not quiet: print('Pruning old containers')
run_command('docker container prune -f') client.containers.prune()
# Get list of images using docker CLI image_list = client.images.list(filters={ 'dangling': False })
images_json = run_command('docker images --format "{{json .}}"')
# Parse the JSON output # Map list of image objects to flattened list of tags (format: "name:version")
image_list = [] tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
for line in images_json.strip().split('\n'):
if line: # Skip empty lines
image_list.append(json.loads(line))
# Extract tags in the format "name:version"
tag_list = []
for img in image_list:
# Skip dangling images
if img.get('Repository') != "<none>" and img.get('Tag') != "<none>":
tag = f"{img.get('Repository')}:{img.get('Tag')}"
# Filter to only SO images (base name begins with "so-") # Filter to only SO images (base name begins with "so-")
if re.match(r'^.*\/so-[^\/]*$', get_image_name(tag)): tag_list = list(filter(lambda x: re.match(r'^.*\/so-[^\/]*$', get_image_name(x)), tag_list))
tag_list.append(tag)
# Group tags into lists by base name (sort by same projection first) # Group tags into lists by base name (sort by same projection first)
tag_list.sort(key=lambda x: get_so_image_basename(x)) tag_list.sort(key=lambda x: get_so_image_basename(x))
grouped_tag_lists = [list(it) for k, it in groupby(tag_list, lambda x: get_so_image_basename(x))] grouped_tag_lists = [ list(it) for _, it in groupby(tag_list, lambda x: get_so_image_basename(x)) ]
no_prunable = True no_prunable = True
for t_list in grouped_tag_lists: for t_list in grouped_tag_lists:
try: try:
# Group tags by version, in case multiple images exist with the same version string # Group tags by version, in case multiple images exist with the same version string
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True) t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
grouped_t_list = [list(it) for k, it in groupby(t_list, lambda x: get_image_version(x))] grouped_t_list = [ list(it) for _,it in groupby(t_list, lambda x: get_image_version(x)) ]
# Keep the 2 most current version groups # Keep the 2 most current version groups
if len(grouped_t_list) <= 2: if len(grouped_t_list) <= 2:
continue continue
@@ -81,10 +71,10 @@ def main(quiet):
for tag in group: for tag in group:
if not quiet: print(f'Removing image {tag}') if not quiet: print(f'Removing image {tag}')
try: try:
run_command(f'docker rmi -f {tag}') client.images.remove(tag, force=True)
except Exception as e: except docker.errors.ClientError as e:
print(f'Could not remove image {tag}, continuing...') print(f'Could not remove image {tag}, continuing...')
except (InvalidVersion) as e: except (docker.errors.APIError, InvalidVersion) as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr) print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1) exit(1)
except Exception as e: except Exception as e:
@@ -95,9 +85,6 @@ def main(quiet):
if no_prunable and not quiet: if no_prunable and not quiet:
print('No Security Onion images to prune') print('No Security Onion images to prune')
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
exit(1)
if __name__ == "__main__": if __name__ == "__main__":
main_parser = argparse.ArgumentParser(add_help=False) main_parser = argparse.ArgumentParser(add_help=False)

View File

@@ -25,10 +25,10 @@ container_list() {
if [ $MANAGERCHECK == 'so-import' ]; then if [ $MANAGERCHECK == 'so-import' ]; then
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-elasticsearch" "so-elasticsearch"
"so-idstools"
"so-influxdb" "so-influxdb"
"so-kibana" "so-kibana"
"so-kratos" "so-kratos"
"so-hydra"
"so-nginx" "so-nginx"
"so-pcaptools" "so-pcaptools"
"so-soc" "so-soc"
@@ -48,11 +48,11 @@ container_list() {
"so-elastic-fleet-package-registry" "so-elastic-fleet-package-registry"
"so-elasticsearch" "so-elasticsearch"
"so-idh" "so-idh"
"so-idstools"
"so-influxdb" "so-influxdb"
"so-kafka" "so-kafka"
"so-kibana" "so-kibana"
"so-kratos" "so-kratos"
"so-hydra"
"so-logstash" "so-logstash"
"so-nginx" "so-nginx"
"so-pcaptools" "so-pcaptools"
@@ -60,6 +60,8 @@ container_list() {
"so-soc" "so-soc"
"so-steno" "so-steno"
"so-strelka-backend" "so-strelka-backend"
"so-strelka-filestream"
"so-strelka-frontend"
"so-strelka-manager" "so-strelka-manager"
"so-suricata" "so-suricata"
"so-telegraf" "so-telegraf"
@@ -67,6 +69,7 @@ container_list() {
) )
else else
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-idstools"
"so-elasticsearch" "so-elasticsearch"
"so-logstash" "so-logstash"
"so-nginx" "so-nginx"
@@ -109,10 +112,6 @@ update_docker_containers() {
container_list container_list
fi fi
# all the images using ELASTICSEARCHDEFAULTS.elasticsearch.version
# does not include so-elastic-fleet since that container uses so-elastic-agent image
local IMAGES_USING_ES_VERSION=("so-elasticsearch")
rm -rf $SIGNPATH >> "$LOG_FILE" 2>&1 rm -rf $SIGNPATH >> "$LOG_FILE" 2>&1
mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1 mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1
@@ -140,36 +139,15 @@ update_docker_containers() {
$PROGRESS_CALLBACK $i $PROGRESS_CALLBACK $i
fi fi
if [[ " ${IMAGES_USING_ES_VERSION[*]} " =~ [[:space:]]${i}[[:space:]] ]]; then
# this is an es container so use version defined in elasticsearch defaults.yaml
local UPDATE_DIR='/tmp/sogh/securityonion'
if [ ! -d "$UPDATE_DIR" ]; then
UPDATE_DIR=/securityonion
fi
local v1=0
local v2=0
if [[ -f "$UPDATE_DIR/salt/elasticsearch/defaults.yaml" ]]; then
v1=$(egrep " +version: " "$UPDATE_DIR/salt/elasticsearch/defaults.yaml" | awk -F: '{print $2}' | tr -d '[:space:]')
fi
if [[ -f "$DEFAULT_SALT_DIR/salt/elasticsearch/defaults.yaml" ]]; then
v2=$(egrep " +version: " "$DEFAULT_SALT_DIR/salt/elasticsearch/defaults.yaml" | awk -F: '{print $2}' | tr -d '[:space:]')
fi
local highest_es_version=$(compare_es_versions "$v1" "$v2")
local image=$i:$highest_es_version$IMAGE_TAG_SUFFIX
local sig_url=https://sigs.securityonion.net/es-$highest_es_version/$image.sig
else
# this is not an es container so use the so version for the version
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
local sig_url=https://sigs.securityonion.net/$VERSION/$image.sig
fi
# Pull down the trusted docker image # Pull down the trusted docker image
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
run_check_net_err \ run_check_net_err \
"docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \ "docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \
"Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1 "Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1
# Get signature # Get signature
run_check_net_err \ run_check_net_err \
"curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' $sig_url --output $SIGNPATH/$image.sig" \ "curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" \
"Could not pull signature file for $image, please ensure connectivity to https://sigs.securityonion.net " \ "Could not pull signature file for $image, please ensure connectivity to https://sigs.securityonion.net " \
noretry >> "$LOG_FILE" 2>&1 noretry >> "$LOG_FILE" 2>&1
# Dump our hash values # Dump our hash values

View File

@@ -95,8 +95,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in start_workers" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in buffer_initialize" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready
@@ -125,10 +123,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|No shard available" # Typical error when making a query before ES has finished loading all indices
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -153,13 +147,6 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncing rule" # false positive (rule sync log line includes rule name which can contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|request_unauthorized" # false positive (login failures to Hydra result in an 'error' log)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
fi fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -183,7 +170,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to gather disk name" # InfluxDB known error, can't read disks because the container doesn't have them mounted
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
@@ -215,14 +201,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp. EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error running query:" # Specific issues with detection rules
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
fi fi
RESULT=0 RESULT=0
@@ -258,24 +236,12 @@ exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
exclude_log "/nsm/kafka/data/" # ignore Kafka data directory from log check.
# Include Zeek reporter.log to detect errors after running known good pcap(s) through sensor
echo "/nsm/zeek/spool/logger/reporter.log" >> /tmp/log_check_files
for log_file in $(cat /tmp/log_check_files); do for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file" status "Checking log file $log_file"
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
check_for_errors check_for_errors
done done
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
if [[ -f /var/log/messages ]]; then
status "Checking log file /var/log/messages"
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
RESULT=1
fi
fi
# Cleanup temp files # Cleanup temp files
rm -f /tmp/log_check_files rm -f /tmp/log_check_files

View File

@@ -1,98 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0."
set -e
# This script is intended to be used in the case the ISO install did not properly setup TPM decrypt for LUKS partitions at boot.
if [ -z $NOROOT ]; then
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
fi
ENROLL_TPM=N
while [[ $# -gt 0 ]]; do
case $1 in
--enroll-tpm)
ENROLL_TPM=Y
;;
*)
echo "Usage: $0 [options]"
echo ""
echo "where options are:"
echo " --enroll-tpm for when TPM enrollment was not selected during ISO install."
echo ""
exit 1
;;
esac
shift
done
check_for_tpm() {
echo -n "Checking for TPM: "
if [ -d /sys/class/tpm/tpm0 ]; then
echo -e "tpm0 found."
TPM="yes"
# Check if TPM is using sha1 or sha256
if [ -d /sys/class/tpm/tpm0/pcr-sha1 ]; then
echo -e "TPM is using sha1.\n"
TPM_PCR="sha1"
elif [ -d /sys/class/tpm/tpm0/pcr-sha256 ]; then
echo -e "TPM is using sha256.\n"
TPM_PCR="sha256"
fi
else
echo -e "No TPM found.\n"
exit 1
fi
}
check_for_luks_partitions() {
echo "Checking for LUKS partitions"
for part in $(lsblk -o NAME,FSTYPE -ln | grep crypto_LUKS | awk '{print $1}'); do
echo "Found LUKS partition: $part"
LUKS_PARTITIONS+=("$part")
done
if [ ${#LUKS_PARTITIONS[@]} -eq 0 ]; then
echo -e "No LUKS partitions found.\n"
exit 1
fi
echo ""
}
enroll_tpm_in_luks() {
read -s -p "Enter the LUKS passphrase used during ISO install: " LUKS_PASSPHRASE
echo ""
for part in "${LUKS_PARTITIONS[@]}"; do
echo "Enrolling TPM for LUKS device: /dev/$part"
if [ "$TPM_PCR" == "sha1" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha1","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
elif [ "$TPM_PCR" == "sha256" ]; then
clevis luks bind -d /dev/$part tpm2 '{"pcr_bank":"sha256","pcr_ids":"7"}' <<< $LUKS_PASSPHRASE
fi
done
}
regenerate_tpm_enrollment_token() {
for part in "${LUKS_PARTITIONS[@]}"; do
clevis luks regen -d /dev/$part -s 1 -q
done
}
check_for_tpm
check_for_luks_partitions
if [[ $ENROLL_TPM == "Y" ]]; then
enroll_tpm_in_luks
else
regenerate_tpm_enrollment_token
fi
echo "Running dracut"
dracut -fv
echo -e "\nTPM configuration complete. Reboot the system to verify the TPM is correctly decrypting the LUKS partition(s) at boot.\n"

View File

@@ -10,7 +10,7 @@
. /usr/sbin/so-common . /usr/sbin/so-common
. /usr/sbin/so-image-common . /usr/sbin/so-image-common
REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"} REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)}
REPLAYSPEED=${REPLAYSPEED:-10} REPLAYSPEED=${REPLAYSPEED:-10}
mkdir -p /opt/so/samples mkdir -p /opt/so/samples

View File

@@ -1,53 +0,0 @@
#!/usr/bin/python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import logging
import os
import sys
def setup_logging(logger_name, log_file_path, log_level=logging.INFO, format_str='%(asctime)s - %(levelname)s - %(message)s'):
"""
Sets up logging for a script.
Parameters:
logger_name (str): The name of the logger.
log_file_path (str): The file path for the log file.
log_level (int): The logging level (e.g., logging.INFO, logging.DEBUG).
format_str (str): The format string for log messages.
Returns:
logging.Logger: Configured logger object.
"""
logger = logging.getLogger(logger_name)
logger.setLevel(log_level)
# Create directory for log file if it doesn't exist
log_file_dir = os.path.dirname(log_file_path)
if log_file_dir and not os.path.exists(log_file_dir):
try:
os.makedirs(log_file_dir)
except OSError as e:
print(f"Error creating directory {log_file_dir}: {e}")
sys.exit(1)
# Create handlers
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler(log_file_path)
c_handler.setLevel(log_level)
f_handler.setLevel(log_level)
# Create formatter and add it to handlers
formatter = logging.Formatter(format_str)
c_handler.setFormatter(formatter)
f_handler.setFormatter(formatter)
# Add handlers to the logger if they are not already added
if not logger.hasHandlers():
logger.addHandler(c_handler)
logger.addHandler(f_handler)
return logger

View File

@@ -63,7 +63,7 @@ function status {
function pcapinfo() { function pcapinfo() {
PCAP=$1 PCAP=$1
ARGS=$2 ARGS=$2
docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -ae $ARGS docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
} }
function pcapfix() { function pcapfix() {
@@ -89,7 +89,6 @@ function suricata() {
-v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${LOG_PATH}:/var/log/suricata/:rw \
-v ${NSM_PATH}/:/nsm/:rw \ -v ${NSM_PATH}/:/nsm/:rw \
-v "$PCAP:/input.pcap:ro" \ -v "$PCAP:/input.pcap:ro" \
-v /dev/null:/nsm/suripcap:rw \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
@@ -173,7 +172,7 @@ for PCAP in $INPUT_FILES; do
status "- assigning unique identifier to import: $HASH" status "- assigning unique identifier to import: $HASH"
pcap_data=$(pcapinfo "${PCAP}") pcap_data=$(pcapinfo "${PCAP}")
if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
status "- this PCAP file is invalid; skipping" status "- this PCAP file is invalid; skipping"
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1)) INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
else else
@@ -205,8 +204,8 @@ for PCAP in $INPUT_FILES; do
HASHES="${HASHES} ${HASH}" HASHES="${HASHES} ${HASH}"
fi fi
START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}') START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}') END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
status "- found PCAP data spanning dates $START through $END" status "- found PCAP data spanning dates $START through $END"
# compare $START to $START_OLDEST # compare $START to $START_OLDEST
@@ -248,7 +247,7 @@ fi
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g') START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g') END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.as.organization.name%20source.geo.country_name%20%7C%20groupby%20destination.as.organization.name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
status "Import complete!" status "Import complete!"
status status

View File

@@ -9,9 +9,6 @@
. /usr/sbin/so-common . /usr/sbin/so-common
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
{%- if salt['grains.get']('sosmodel', '') %} {%- if salt['grains.get']('sosmodel', '') %}
{%- set model = salt['grains.get']('sosmodel') %} {%- set model = salt['grains.get']('sosmodel') %}
model={{ model }} model={{ model }}
@@ -19,35 +16,25 @@ model={{ model }}
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
exit 0 exit 0
fi fi
for i in "${software_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=true
is_hwraid=false
break
fi
done
for i in "${hardware_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=false
is_hwraid=true
break
fi
done
{%- else %} {%- else %}
echo "This is not an appliance" echo "This is not an appliance"
exit 0 exit 0
{%- endif %} {%- endif %}
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
is_bossraid=true
fi
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
is_swraid=true
fi
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
is_hwraid=true
fi
check_nsm_raid() { check_nsm_raid() {
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl) PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal) MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
if [[ "$model" == "SOS500" || "$model" == "SOS500-DE02" ]]; then
#This doesn't have raid if [[ $APPLIANCE == '1' ]]; then
HWRAID=0
else
if [[ -n $PERCCLI ]]; then if [[ -n $PERCCLI ]]; then
HWRAID=0 HWRAID=0
elif [[ -n $MEGACTL ]]; then elif [[ -n $MEGACTL ]]; then
@@ -55,6 +42,7 @@ check_nsm_raid() {
else else
HWRAID=1 HWRAID=1
fi fi
fi fi
} }
@@ -62,16 +50,7 @@ check_nsm_raid() {
check_boss_raid() { check_boss_raid() {
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional) MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter") MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter")
BOSSNVMECLI=$(/usr/local/bin/mnv_cli info -o vd -i 0 | grep Functional)
# Is this NVMe Boss Raid?
if [[ "$model" =~ "-DE02" ]]; then
if [[ -n $BOSSNVMECLI ]]; then
BOSSRAID=0
else
BOSSRAID=1
fi
else
# Check to see if this is a SM based system # Check to see if this is a SM based system
if [[ -z $MVTEST ]]; then if [[ -z $MVTEST ]]; then
if [[ -n $MVCLI ]]; then if [[ -n $MVCLI ]]; then
@@ -83,7 +62,6 @@ check_boss_raid() {
# This doesn't have boss raid so lets make it 0 # This doesn't have boss raid so lets make it 0
BOSSRAID=0 BOSSRAID=0
fi fi
fi
} }
check_software_raid() { check_software_raid() {
@@ -101,13 +79,14 @@ SWRAID=0
BOSSRAID=0 BOSSRAID=0
HWRAID=0 HWRAID=0
if [[ "$is_hwraid" == "true" ]]; then if [[ $is_hwraid ]]; then
check_nsm_raid check_nsm_raid
fi
if [[ $is_bossraid ]]; then
check_boss_raid check_boss_raid
fi fi
if [[ "$is_softwareraid" == "true" ]]; then if [[ $is_swraid ]]; then
check_software_raid check_software_raid
check_boss_raid
fi fi
sum=$(($SWRAID + $BOSSRAID + $HWRAID)) sum=$(($SWRAID + $BOSSRAID + $HWRAID))

View File

@@ -1,132 +0,0 @@
#!/opt/saltstack/salt/bin/python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
{% if 'vrt' in salt['pillar.get']('features', []) -%}
"""
Script for emitting VM deployment status events to the Salt event bus.
This script provides functionality to emit status events for VM deployment operations,
used by various Security Onion VM management tools.
Usage:
so-salt-emit-vm-deployment-status-event -v <vm_name> -H <hypervisor> -s <status>
Arguments:
-v, --vm-name Name of the VM (hostname_role)
-H, --hypervisor Name of the hypervisor
-s, --status Current deployment status of the VM
Example:
so-salt-emit-vm-deployment-status-event -v sensor1_sensor -H hypervisor1 -s "Creating"
"""
import sys
import argparse
import logging
import salt.client
from typing import Dict, Any
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
log = logging.getLogger(__name__)
def emit_event(vm_name: str, hypervisor: str, status: str) -> bool:
"""
Emit a VM deployment status event to the salt event bus.
Args:
vm_name: Name of the VM (hostname_role)
hypervisor: Name of the hypervisor
status: Current deployment status of the VM
Returns:
bool: True if event was sent successfully, False otherwise
Raises:
ValueError: If status is not a valid deployment status
"""
log.info("Attempting to emit deployment event...")
try:
caller = salt.client.Caller()
event_data = {
'vm_name': vm_name,
'hypervisor': hypervisor,
'status': status
}
# Use consistent event tag structure
event_tag = f'soc/dyanno/hypervisor/{status.lower()}'
ret = caller.cmd(
'event.send',
event_tag,
event_data
)
if not ret:
log.error("Failed to emit VM deployment status event: %s", event_data)
return False
log.info("Successfully emitted VM deployment status event: %s", event_data)
return True
except Exception as e:
log.error("Error emitting VM deployment status event: %s", str(e))
return False
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Emit VM deployment status events to the Salt event bus.'
)
parser.add_argument('-v', '--vm-name', required=True,
help='Name of the VM (hostname_role)')
parser.add_argument('-H', '--hypervisor', required=True,
help='Name of the hypervisor')
parser.add_argument('-s', '--status', required=True,
help='Current deployment status of the VM')
return parser.parse_args()
def main():
"""Main entry point for the script."""
try:
args = parse_args()
success = emit_event(
vm_name=args.vm_name,
hypervisor=args.hypervisor,
status=args.status
)
if not success:
sys.exit(1)
except Exception as e:
log.error("Failed to emit status event: %s", str(e))
sys.exit(1)
if __name__ == '__main__':
main()
{%- else -%}
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
for more information about purchasing a license to enable this feature."
{% endif -%}

View File

@@ -24,6 +24,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-idstools':
final_octet: 25
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-influxdb': 'so-influxdb':
final_octet: 26 final_octet: 26
port_bindings: port_bindings:
@@ -46,14 +51,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-hydra':
final_octet: 30
port_bindings:
- 0.0.0.0:4444:4444
- 0.0.0.0:4445:4445
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-logstash': 'so-logstash':
final_octet: 29 final_octet: 29
port_bindings: port_bindings:
@@ -77,7 +74,6 @@ docker:
- 443:443 - 443:443
- 8443:8443 - 8443:8443
- 7788:7788 - 7788:7788
- 7789:7789
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
@@ -184,8 +180,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits:
- memlock=524288000
'so-zeek': 'so-zeek':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []
@@ -195,9 +189,7 @@ docker:
final_octet: 88 final_octet: 88
port_bindings: port_bindings:
- 0.0.0.0:9092:9092 - 0.0.0.0:9092:9092
- 0.0.0.0:29092:29092
- 0.0.0.0:9093:9093 - 0.0.0.0:9093:9093
- 0.0.0.0:8778:8778
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []

View File

@@ -20,30 +20,30 @@ dockergroup:
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-1 - containerd.io: 1.6.21-1
- docker-ce: 5:27.2.0-1~debian.12~bookworm - docker-ce: 5:24.0.3-1~debian.12~bookworm
- docker-ce-cli: 5:27.2.0-1~debian.12~bookworm - docker-ce-cli: 5:24.0.3-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:27.2.0-1~debian.12~bookworm - docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm
- hold: True - hold: True
- update_holds: True - update_holds: True
{% elif grains.oscodename == 'jammy' %} {% elif grains.oscodename == 'jammy' %}
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-1 - containerd.io: 1.6.21-1
- docker-ce: 5:27.2.0-1~ubuntu.22.04~jammy - docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:27.2.0-1~ubuntu.22.04~jammy - docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.22.04~jammy - docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy
- hold: True - hold: True
- update_holds: True - update_holds: True
{% else %} {% else %}
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-1 - containerd.io: 1.4.9-1
- docker-ce: 5:27.2.0-1~ubuntu.20.04~focal - docker-ce: 5:20.10.8~3-0~ubuntu-focal
- docker-ce-cli: 5:27.2.0-1~ubuntu.20.04~focal - docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.20.04~focal - docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal
- hold: True - hold: True
- update_holds: True - update_holds: True
{% endif %} {% endif %}
@@ -51,10 +51,10 @@ dockerheldpackages:
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 1.7.21-3.1.el9 - containerd.io: 1.6.21-3.1.el9
- docker-ce: 3:27.2.0-1.el9 - docker-ce: 24.0.4-1.el9
- docker-ce-cli: 1:27.2.0-1.el9 - docker-ce-cli: 24.0.4-1.el9
- docker-ce-rootless-extras: 27.2.0-1.el9 - docker-ce-rootless-extras: 24.0.4-1.el9
- hold: True - hold: True
- update_holds: True - update_holds: True
{% endif %} {% endif %}

View File

@@ -41,10 +41,10 @@ docker:
forcedType: "[]string" forcedType: "[]string"
so-elastic-fleet: *dockerOptions so-elastic-fleet: *dockerOptions
so-elasticsearch: *dockerOptions so-elasticsearch: *dockerOptions
so-idstools: *dockerOptions
so-influxdb: *dockerOptions so-influxdb: *dockerOptions
so-kibana: *dockerOptions so-kibana: *dockerOptions
so-kratos: *dockerOptions so-kratos: *dockerOptions
so-hydra: *dockerOptions
so-logstash: *dockerOptions so-logstash: *dockerOptions
so-nginx: *dockerOptions so-nginx: *dockerOptions
so-nginx-fleet-node: *dockerOptions so-nginx-fleet-node: *dockerOptions
@@ -63,42 +63,6 @@ docker:
so-elastic-agent: *dockerOptions so-elastic-agent: *dockerOptions
so-telegraf: *dockerOptions so-telegraf: *dockerOptions
so-steno: *dockerOptions so-steno: *dockerOptions
so-suricata: so-suricata: *dockerOptions
final_octet:
description: Last octet of the container IP address.
helpLink: docker.html
readonly: True
advanced: True
global: True
port_bindings:
description: List of port bindings for the container.
helpLink: docker.html
advanced: True
multiline: True
forcedType: "[]string"
custom_bind_mounts:
description: List of custom local volume bindings.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_hosts:
description: List of additional host entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
ulimits:
description: Ulimits for the container, in bytes.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions so-zeek: *dockerOptions
so-kafka: *dockerOptions so-kafka: *dockerOptions

View File

@@ -82,36 +82,6 @@ elastasomodulesync:
- group: 933 - group: 933
- makedirs: True - makedirs: True
elastacustomdir:
file.directory:
- name: /opt/so/conf/elastalert/custom
- user: 933
- group: 933
- makedirs: True
elastacustomsync:
file.recurse:
- name: /opt/so/conf/elastalert/custom
- source: salt://elastalert/files/custom
- user: 933
- group: 933
- makedirs: True
- file_mode: 660
- show_changes: False
elastapredefinedsync:
file.recurse:
- name: /opt/so/conf/elastalert/predefined
- source: salt://elastalert/files/predefined
- user: 933
- group: 933
- makedirs: True
- template: jinja
- file_mode: 660
- context:
elastalert: {{ ELASTALERTMERGED }}
- show_changes: False
elastaconf: elastaconf:
file.managed: file.managed:
- name: /opt/so/conf/elastalert/elastalert_config.yaml - name: /opt/so/conf/elastalert/elastalert_config.yaml

View File

@@ -1,6 +1,5 @@
elastalert: elastalert:
enabled: False enabled: False
alerter_parameters: ""
config: config:
rules_folder: /opt/elastalert/rules/ rules_folder: /opt/elastalert/rules/
scan_subdirectories: true scan_subdirectories: true

View File

@@ -30,8 +30,6 @@ so-elastalert:
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro - /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
- /opt/so/log/elastalert:/var/log/elastalert:rw - /opt/so/log/elastalert:/var/log/elastalert:rw
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro - /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
- /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro
- /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro - /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %} {% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}

View File

@@ -1 +0,0 @@
THIS IS A PLACEHOLDER FILE

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PlaybookESAlerter(Alerter):
"""
Use matched data to create alerts in elasticsearch
"""
required_options = set(['play_title','play_url','sigma_level'])
def alert(self, matches):
for match in matches:
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'PlaybookESAlerter'}

View File

@@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class SecurityOnionESAlerter(Alerter):
"""
Use matched data to create alerts in Elasticsearch.
"""
required_options = set(['detection_title', 'sigma_level'])
optional_fields = ['sigma_category', 'sigma_product', 'sigma_service']
def alert(self, matches):
for match in matches:
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
# Start building the rule dict
rule_info = {
"name": self.rule['detection_title'],
"uuid": self.rule['detection_public_id']
}
# Add optional fields if they are present in the rule
for field in self.optional_fields:
rule_key = field.split('_')[-1] # Assumes field format "sigma_<key>"
if field in self.rule:
rule_info[rule_key] = self.rule[field]
# Construct the payload with the conditional rule_info
payload = {
"tags": "alert",
"rule": rule_info,
"event": {
"severity": self.rule['event.severity'],
"module": self.rule['event.module'],
"dataset": self.rule['event.dataset'],
"severity_label": self.rule['sigma_level']
},
"sigma_level": self.rule['sigma_level'],
"event_data": match,
"@timestamp": timestamp
}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-detections.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'SecurityOnionESAlerter'}

View File

@@ -1,6 +0,0 @@
{% if elastalert.get('jira_user', '') | length > 0 and elastalert.get('jira_pass', '') | length > 0 %}
user: {{ elastalert.jira_user }}
password: {{ elastalert.jira_pass }}
{% else %}
apikey: {{ elastalert.get('jira_api_key', '') }}
{% endif %}

View File

@@ -1,2 +0,0 @@
user: {{ elastalert.get('smtp_user', '') }}
password: {{ elastalert.get('smtp_pass', '') }}

View File

@@ -13,19 +13,3 @@
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %} {% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
{% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %} {% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %}
{% if 'ntf' in salt['pillar.get']('features', []) %}
{% set params = ELASTALERTMERGED.get('alerter_parameters', '') | load_yaml %}
{% if params != None and params | length > 0 %}
{% do ELASTALERTMERGED.config.update(params) %}
{% endif %}
{% if ELASTALERTMERGED.get('smtp_user', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'smtp_auth_file': '/opt/elastalert/predefined/smtp_auth.yaml'}) %}
{% endif %}
{% if ELASTALERTMERGED.get('jira_user', '') | length > 0 or ELASTALERTMERGED.get('jira_key', '') | length > 0 %}
{% do ELASTALERTMERGED.config.update({'jira_account_file': '/opt/elastalert/predefined/jira_auth.yaml'}) %}
{% endif %}
{% endif %}

View File

@@ -1,99 +1,6 @@
elastalert: elastalert:
enabled: enabled:
description: Enables or disables the ElastAlert 2 process. This process is critical for ensuring alerts arrive in SOC, and for outbound notification delivery. description: You can enable or disable Elastalert.
helpLink: elastalert.html
alerter_parameters:
title: Custom Configuration Parameters
description: Optional configuration parameters made available as defaults for all rules and alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available configuration parameters. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: elastalert.html
forcedType: string
jira_api_key:
title: Jira API Key
description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_pass:
title: Jira Password
description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
jira_user:
title: Jira Username
description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
smtp_pass:
title: SMTP Password
description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
sensitive: True
helpLink: elastalert.html
forcedType: string
smtp_user:
title: SMTP Username
description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key.
global: True
helpLink: elastalert.html
forcedType: string
files:
custom:
alertmanager_ca__crt:
description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
gelf_ca__crt:
description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
http_post2_ca__crt:
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
ms_teams_ca__crt:
description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
pagerduty_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
rocket_chat_ca__crt:
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__crt:
description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
smtp__key:
description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html
slack_ca__crt:
description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
global: True
file: True
helpLink: elastalert.html helpLink: elastalert.html
config: config:
disable_rules_on_error: disable_rules_on_error:

View File

@@ -1,4 +1,4 @@
elastic_fleet_package_registry: elastic_fleet_package_registry:
enabled: enabled:
description: Enables or disables the Fleet package registry process. This process must remain enabled to allow Elastic Agent packages to be updated. description: You can enable or disable Elastic Fleet Package Registry.
advanced: True advanced: True

View File

@@ -8,6 +8,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- elasticagent.config - elasticagent.config
- elasticagent.sostatus - elasticagent.sostatus

View File

@@ -1,4 +0,0 @@
elasticagent:
enabled:
description: Enables or disables the Elastic Agent process. This process must remain enabled to allow collection of node events.
advanced: True

View File

@@ -9,6 +9,3 @@ fleetartifactdir:
- user: 947 - user: 947
- group: 939 - group: 939
- makedirs: True - makedirs: True
- recurse:
- user
- group

View File

@@ -9,9 +9,6 @@
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} {% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% set node_data = salt['pillar.get']('node_data') %} {% set node_data = salt['pillar.get']('node_data') %}
include:
- elasticfleet.artifact_registry
# Add EA Group # Add EA Group
elasticfleetgroup: elasticfleetgroup:
group.present: group.present:
@@ -33,7 +30,6 @@ elasticfleet_sbin:
- user: 947 - user: 947
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
- show_changes: False
elasticfleet_sbin_jinja: elasticfleet_sbin_jinja:
file.recurse: file.recurse:
@@ -45,7 +41,6 @@ elasticfleet_sbin_jinja:
- template: jinja - template: jinja
- exclude_pat: - exclude_pat:
- so-elastic-fleet-package-upgrade # exclude this because we need to watch it for changes - so-elastic-fleet-package-upgrade # exclude this because we need to watch it for changes
- show_changes: False
eaconfdir: eaconfdir:
file.directory: file.directory:
@@ -68,14 +63,6 @@ eastatedir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
custommappingsdir:
file.directory:
- name: /nsm/custom-mappings
- user: 947
- group: 939
- makedirs: True
eapackageupgrade: eapackageupgrade:
file.managed: file.managed:
- name: /usr/sbin/so-elastic-fleet-package-upgrade - name: /usr/sbin/so-elastic-fleet-package-upgrade
@@ -86,56 +73,6 @@ eapackageupgrade:
- template: jinja - template: jinja
{% if GLOBALS.role != "so-fleet" %} {% if GLOBALS.role != "so-fleet" %}
{% if not GLOBALS.airgap %}
soresourcesrepoclone:
git.latest:
- name: https://github.com/Security-Onion-Solutions/securityonion-resources.git
- target: /nsm/securityonion-resources
- rev: 'main'
- depth: 1
- force_reset: True
{% endif %}
elasticdefendconfdir:
file.directory:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/rulesets
- user: 947
- group: 939
- makedirs: True
elasticdefenddisabled:
file.managed:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml
- source: salt://elasticfleet/files/soc/elastic-defend-disabled-filters.yaml
- user: 947
- group: 939
- mode: 600
elasticdefendcustom:
file.managed:
- name: /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters-raw
- source: salt://elasticfleet/files/soc/elastic-defend-custom-filters.yaml
- user: 947
- group: 939
- mode: 600
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
cron-elastic-defend-filters:
cron.{{ap}}:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- identifier: elastic-defend-filters
- user: root
- minute: '0'
- hour: '3'
- daymonth: '*'
- month: '*'
- dayweek: '*'
eaintegrationsdir: eaintegrationsdir:
file.directory: file.directory:
- name: /opt/so/conf/elastic-fleet/integrations - name: /opt/so/conf/elastic-fleet/integrations
@@ -150,7 +87,6 @@ eadynamicintegration:
- user: 947 - user: 947
- group: 939 - group: 939
- template: jinja - template: jinja
- show_changes: False
eaintegration: eaintegration:
file.recurse: file.recurse:
@@ -158,7 +94,6 @@ eaintegration:
- source: salt://elasticfleet/files/integrations - source: salt://elasticfleet/files/integrations
- user: 947 - user: 947
- group: 939 - group: 939
- show_changes: False
eaoptionalintegrationsdir: eaoptionalintegrationsdir:
file.directory: file.directory:
@@ -169,7 +104,7 @@ eaoptionalintegrationsdir:
{% for minion in node_data %} {% for minion in node_data %}
{% set role = node_data[minion]["role"] %} {% set role = node_data[minion]["role"] %}
{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %} {% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %} {% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
{% set integration_keys = optional_integrations.keys() %} {% set integration_keys = optional_integrations.keys() %}
fleet_server_integrations_{{ minion }}: fleet_server_integrations_{{ minion }}:

View File

@@ -8,10 +8,6 @@ elasticfleet:
endpoints_enrollment: '' endpoints_enrollment: ''
es_token: '' es_token: ''
grid_enrollment: '' grid_enrollment: ''
defend_filters:
enable_auto_configuration: False
subscription_integrations: False
auto_upgrade_integrations: False
logging: logging:
zeek: zeek:
excluded: excluded:
@@ -34,21 +30,87 @@ elasticfleet:
- stderr - stderr
- stdout - stdout
packages: packages:
- apache
- auditd
- auth0
- aws
- azure
- barracuda
- carbonblack_edr
- checkpoint
- cisco_asa
- cisco_duo
- cisco_ftd
- cisco_ios
- cisco_ise
- cisco_meraki
- cisco_umbrella
- citrix_adc
- citrix_waf
- cloudflare
- crowdstrike
- darktrace
- elastic_agent - elastic_agent
- elasticsearch - elasticsearch
- endpoint - endpoint
- f5_bigip
- fim
- fireeye
- fleet_server - fleet_server
- filestream - fortinet
- fortinet_fortigate
- gcp
- github
- google_workspace
- http_endpoint - http_endpoint
- httpjson - httpjson
- iis
- journald
- juniper
- juniper_srx
- kafka_log
- lastpass
- log - log
- m365_defender
- microsoft_defender_endpoint
- microsoft_dhcp
- microsoft_sqlserver
- mimecast
- mysql
- netflow
- nginx
- o365
- okta
- osquery_manager - osquery_manager
- panw
- pfsense
- proofpoint_tap
- pulse_connect_secure
- redis - redis
- sentinel_one
- snort
- snyk
- sonicwall_firewall
- sophos
- sophos_central
- symantec_endpoint
- system - system
- tcp - tcp
- tenable_sc
- ti_abusech
- ti_anomali
- ti_cybersixgill
- ti_misp
- ti_otx
- ti_recordedfuture
- ti_threatq
- udp - udp
- vsphere
- windows - windows
- winlog - winlog
- zscaler_zia
- zscaler_zpa
- 1password
optional_integrations: optional_integrations:
sublime_platform: sublime_platform:
enabled_nodes: [] enabled_nodes: []
@@ -56,8 +118,3 @@ elasticfleet:
base_url: https://api.platform.sublimesecurity.com base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m poll_interval: 5m
limit: 100 limit: 100
kismet:
base_url: http://localhost:2501
poll_interval: 1m
api_key:
enabled_nodes: []

View File

@@ -17,21 +17,17 @@ include:
- elasticfleet.sostatus - elasticfleet.sostatus
- ssl - ssl
{% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready # Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet: wait_for_elasticsearch_elasticfleet:
cmd.run: cmd.run:
- name: so-elasticsearch-wait - name: so-elasticsearch-wait
{% endif %}
# If enabled, automatically update Fleet Logstash Outputs # If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %} {% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs: so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update - name: /usr/sbin/so-elastic-fleet-outputs-update
- retry: - retry: True
attempts: 4
interval: 30
{% endif %} {% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection # If enabled, automatically update Fleet Server URLs & ES Connection
@@ -39,9 +35,7 @@ so-elastic-fleet-auto-configure-logstash-outputs:
so-elastic-fleet-auto-configure-server-urls: so-elastic-fleet-auto-configure-server-urls:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update - name: /usr/sbin/so-elastic-fleet-urls-update
- retry: - retry: True
attempts: 4
interval: 30
{% endif %} {% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs # Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
@@ -49,16 +43,12 @@ so-elastic-fleet-auto-configure-server-urls:
so-elastic-fleet-auto-configure-elasticsearch-urls: so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update - name: /usr/sbin/so-elastic-fleet-es-url-update
- retry: - retry: True
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls: so-elastic-fleet-auto-configure-artifact-urls:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update - name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry: - retry: True
attempts: 4
interval: 30
{% endif %} {% endif %}
@@ -67,8 +57,6 @@ so-elastic-fleet-auto-configure-artifact-urls:
elasticagent_syncartifacts: elasticagent_syncartifacts:
file.recurse: file.recurse:
- name: /nsm/elastic-fleet/artifacts/beats - name: /nsm/elastic-fleet/artifacts/beats
- user: 947
- group: 947
- source: salt://beats - source: salt://beats
{% endif %} {% endif %}
@@ -135,46 +123,17 @@ so-elastic-fleet-package-statefile:
so-elastic-fleet-package-upgrade: so-elastic-fleet-package-upgrade:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade - name: /usr/sbin/so-elastic-fleet-package-upgrade
- retry:
attempts: 3
interval: 10
- onchanges: - onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt - file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations: so-elastic-fleet-integrations:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load - name: /usr/sbin/so-elastic-fleet-integration-policy-load
- retry:
attempts: 3
interval: 10
so-elastic-agent-grid-upgrade: so-elastic-agent-grid-upgrade:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-agent-grid-upgrade - name: /usr/sbin/so-elastic-agent-grid-upgrade
- retry: - retry: True
attempts: 12
interval: 5
so-elastic-fleet-integration-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
- retry:
attempts: 3
interval: 10
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
so-elastic-fleet-addon-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
so-elastic-defend-manage-filters-file-watch:
cmd.run:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- onchanges:
- file: elasticdefendcustom
- file: elasticdefenddisabled
{% endif %}
{% endif %} {% endif %}
delete_so-elastic-fleet_so-status.disabled: delete_so-elastic-fleet_so-status.disabled:

View File

@@ -1,19 +0,0 @@
{
"package": {
"name": "fleet_server",
"version": ""
},
"name": "fleet_server-1",
"namespace": "default",
"policy_id": "FleetServer_hostname",
"vars": {},
"inputs": {
"fleet_server-fleet-server": {
"enabled": true,
"vars": {
"custom": "server.ssl.supported_protocols: [\"TLSv1.2\", \"TLSv1.3\"]\nserver.ssl.cipher_suites: [ \"ECDHE-RSA-AES-128-GCM-SHA256\", \"ECDHE-RSA-AES-256-GCM-SHA384\", \"ECDHE-RSA-AES-128-CBC-SHA\", \"ECDHE-RSA-AES-256-CBC-SHA\", \"RSA-AES-128-GCM-SHA256\", \"RSA-AES-256-GCM-SHA384\"]"
},
"streams": {}
}
}
}

View File

@@ -1,46 +0,0 @@
{%- set identities = salt['sqlite3.fetch']('/nsm/kratos/db/db.sqlite', 'SELECT id, json_extract(traits, "$.email") as email FROM identities;') -%}
{%- set valid_identities = false -%}
{%- if identities -%}
{%- set valid_identities = true -%}
{%- for id, email in identities -%}
{%- if not id or not email -%}
{%- set valid_identities = false -%}
{%- break -%}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{
"package": {
"name": "log",
"version": ""
},
"name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs",
"policy_id": "so-grid-nodes_general",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/kratos/kratos.log"
],
"data_stream.dataset": "kratos",
"tags": ["so-kratos"],
{%- if valid_identities -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
{%- else -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
{%- endif -%}
"custom": "pipeline: kratos"
}
}
}
}
},
"force": true
}

View File

@@ -1,36 +0,0 @@
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% raw %}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "kismet-logs",
"namespace": "so",
"description": "Kismet Logs",
"policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}",
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"data_stream.dataset": "kismet",
"request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson",
"request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}",
"request_method": "GET",
"request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'",
"request_redirect_headers_ban_list": [],
"oauth_scopes": [],
"processors": "",
"tags": [],
"pipeline": "kismet.common"
}
}
}
}
},
"force": true
}
{% endraw %}

View File

@@ -5,18 +5,14 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "8.18.1", "version": "8.10.2"
"requires_root": true
}, },
"enabled": true, "enabled": true,
"policy_ids": [ "policy_id": "endpoints-initial",
"endpoints-initial" "inputs": [{
],
"vars": {},
"inputs": [
{
"type": "ENDPOINT_INTEGRATION_CONFIG", "type": "ENDPOINT_INTEGRATION_CONFIG",
"enabled": true, "enabled": true,
"streams": [],
"config": { "config": {
"_config": { "_config": {
"value": { "value": {
@@ -26,8 +22,6 @@
} }
} }
} }
},
"streams": []
} }
] }]
} }

View File

@@ -11,7 +11,7 @@
"winlogs-winlog": { "winlogs-winlog": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"winlog.winlogs": { "winlog.winlog": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational", "channel": "Microsoft-Windows-Windows Defender/Operational",

View File

@@ -1,48 +0,0 @@
{
"package": {
"name": "filestream",
"version": ""
},
"name": "agent-monitor",
"namespace": "",
"description": "",
"policy_ids": [
"so-grid-nodes_general"
],
"output_id": null,
"vars": {},
"inputs": {
"filestream-filestream": {
"enabled": true,
"streams": {
"filestream.generic": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/agents/agent-monitor.log"
],
"data_stream.dataset": "agentmonitor",
"pipeline": "elasticagent.monitor",
"parsers": "",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: gridmetrics",
"tags": [],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": true,
"fingerprint_offset": 0,
"fingerprint_length": 64,
"file_identity_native": false,
"exclude_lines": [],
"include_lines": []
}
}
}
}
}
}

View File

@@ -40,7 +40,7 @@
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/elasticsearch/*.json" "/opt/so/log/elasticsearch/*.log"
] ]
} }
}, },

View File

@@ -19,7 +19,7 @@
], ],
"data_stream.dataset": "idh", "data_stream.dataset": "idh",
"tags": [], "tags": [],
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary", "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"custom": "pipeline: common" "custom": "pipeline: common"
} }
} }

View File

@@ -20,7 +20,7 @@
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"custom": "", "custom": "",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.43.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.38.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.43.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.43.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.38.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
] ]

View File

@@ -3,9 +3,9 @@
"name": "log", "name": "log",
"version": "" "version": ""
}, },
"name": "hydra-logs", "name": "kratos-logs",
"namespace": "so", "namespace": "so",
"description": "Hydra logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"logs-logfile": { "logs-logfile": {
@@ -15,12 +15,12 @@
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/hydra/hydra.log" "/opt/so/log/kratos/kratos.log"
], ],
"data_stream.dataset": "hydra", "data_stream.dataset": "kratos",
"tags": ["so-hydra"], "tags": ["so-kratos"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
"custom": "pipeline: hydra" "custom": "pipeline: kratos"
} }
} }
} }

View File

@@ -1,35 +0,0 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings",
"policy_id": "so-grid-nodes_general",
"vars": {},
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/custom-mappings/ip-descriptions.csv"
],
"data_stream.dataset": "hostnamemappings",
"tags": [
"so-ip-mappings"
],
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"custom": ""
}
}
}
}
},
"force": true
}

View File

@@ -1,35 +0,0 @@
{
"policy_id": "so-grid-nodes_general",
"package": {
"name": "log",
"version": ""
},
"name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs",
"namespace": "so",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc",
"tags": [
"so-soc"
],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}

View File

@@ -11,7 +11,7 @@
"tcp-tcp": { "tcp-tcp": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"tcp.tcp": { "tcp.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"listen_address": "0.0.0.0", "listen_address": "0.0.0.0",
@@ -23,8 +23,7 @@
"syslog" "syslog"
], ],
"syslog_options": "field: message\n#format: auto\n#timezone: Local", "syslog_options": "field: message\n#format: auto\n#timezone: Local",
"ssl": "", "ssl": ""
"custom": ""
} }
} }
} }

View File

@@ -11,7 +11,7 @@
"udp-udp": { "udp-udp": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"udp.udp": { "udp.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"listen_address": "0.0.0.0", "listen_address": "0.0.0.0",
@@ -20,13 +20,11 @@
"pipeline": "syslog", "pipeline": "syslog",
"max_message_size": "10KiB", "max_message_size": "10KiB",
"keep_null": false, "keep_null": false,
"processors": "- add_fields:\n target: event\n fields: \n module: syslog", "processors": "- add_fields:\n target: event\n fields: \n module: syslog\n",
"tags": [ "tags": [
"syslog" "syslog"
], ],
"syslog_options": "field: message\n#format: auto\n#timezone: Local\n", "syslog_options": "field: message\n#format: auto\n#timezone: Local"
"preserve_original_event": false,
"custom": ""
} }
} }
} }

View File

@@ -31,8 +31,7 @@
], ],
"tags": [ "tags": [
"so-grid-node" "so-grid-node"
], ]
"processors": "- if:\n contains:\n message: \"salt-minion\"\n then: \n - dissect:\n tokenizer: \"%{} %{} %{} %{} %{} %{}: [%{log.level}] %{*}\"\n field: \"message\"\n trim_values: \"all\"\n target_prefix: \"\"\n - drop_event:\n when:\n equals:\n log.level: \"INFO\""
} }
} }
} }

View File

@@ -1,27 +0,0 @@
title: 'Template 1'
id: 'This needs to be a UUIDv4 id - https://www.uuidgenerator.net/version4'
description: 'Short description detailing what this rule is filtering and why.'
references: 'Relevant urls, etc'
author: '@SecurityOnion'
date: 'MM/DD/YY'
event_type: 'dns_query'
filter_type: 'exclude'
filter:
selection_1:
TargetField: 'QueryName'
Condition: 'end with'
Pattern: '.thawte.com'
---
title: 'Template 2'
id: 'This needs to be a UUIDv4 id - https://www.uuidgenerator.net/version4'
description: 'Short description detailing what this rule is filtering and why.'
references: 'Relevant urls, etc'
author: '@SecurityOnion'
date: 'MM/DD/YY'
event_type: 'process_creation'
filter_type: 'exclude'
filter:
selection_1:
TargetField: 'ParentImage'
Condition: 'is'
Pattern: 'C:\Windows\Microsoft.NET\Framework\v4.0.30319\ngentask.exe'

View File

@@ -1,3 +0,0 @@
'9EDAA51C-BB12-49D9-8748-2B61371F2E7D':
Date: '10/10/2024'
Notes: 'Example Disabled Filter - Leave this entry here, just copy and paste as needed.'

View File

@@ -2,30 +2,26 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%} {%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
{% if grains.role == 'so-heavynode' %} {%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% endif %}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% if not AGENT_STATUS %} {% if not AGENT_STATUS %}
pull_agent_installer: {% if grains.role not in ['so-heavynode'] %}
file.managed:
- name: /opt/so/so-elastic-agent_linux_amd64
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- mode: 755
- makedirs: True
run_installer: run_installer:
cmd.run: cmd.script:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} - name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- cwd: /opt/so - cwd: /opt/so
- retry: - args: -token={{ GRIDNODETOKENGENERAL }}
attempts: 3 - retry: True
interval: 20 {% else %}
run_installer:
cleanup_agent_installer: cmd.script:
file.absent: - name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- name: /opt/so/so-elastic-agent_linux_amd64 - cwd: /opt/so
- args: -token={{ GRIDNODETOKENHEAVY }}
- retry: True
{% endif %}
{% endif %} {% endif %}

View File

@@ -1,158 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
this file except in compliance with the Elastic License 2.0. #}
{% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %}
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
{% set ADDON_INTEGRATION_DEFAULTS = {} %}
{# Some fleet integrations don't follow the standard naming convention #}
{% set WEIRD_INTEGRATIONS = {
'awsfirehose.logs': 'awsfirehose',
'awsfirehose.metrics': 'aws.cloudwatch',
'cribl.logs': 'cribl',
'cribl.metrics': 'cribl',
'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login',
'azure_application_insights.app_insights': 'azure.app_insights',
'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance',
'azure_metrics.container_registry': 'azure.container_registry',
'azure_metrics.container_service': 'azure.container_service',
'azure_metrics.database_account': 'azure.database_account',
'azure_metrics.monitor': 'azure.monitor',
'azure_metrics.storage_account': 'azure.storage_account',
'azure_openai.metrics': 'azure.open_ai',
'beat.state': 'beats.stack_monitoring.state',
'beat.stats': 'beats.stack_monitoring.stats',
'enterprisesearch.health': 'enterprisesearch.stack_monitoring.health',
'enterprisesearch.stats': 'enterprisesearch.stack_monitoring.stats',
'kibana.cluster_actions': 'kibana.stack_monitoring.cluster_actions',
'kibana.cluster_rules': 'kibana.stack_monitoring.cluster_rules',
'kibana.node_actions': 'kibana.stack_monitoring.node_actions',
'kibana.node_rules': 'kibana.stack_monitoring.node_rules',
'kibana.stats': 'kibana.stack_monitoring.stats',
'kibana.status': 'kibana.stack_monitoring.status',
'logstash.node_cel': 'logstash.stack_monitoring.node',
'logstash.node_stats': 'logstash.stack_monitoring.node_stats',
'synthetics.browser': 'synthetics-browser',
'synthetics.browser_network': 'synthetics-browser.network',
'synthetics.browser_screenshot': 'synthetics-browser.screenshot',
'synthetics.http': 'synthetics-http',
'synthetics.icmp': 'synthetics-icmp',
'synthetics.tcp': 'synthetics-tcp',
'swimlane.swimlane_api': 'swimlane.api',
'swimlane.tenant_api': 'swimlane.tenant',
'swimlane.turbine_api': 'turbine.api'
} %}
{% for pkg in ADDON_PACKAGE_COMPONENTS %}
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
{# skip core integrations #}
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
{# generate defaults for each integration #}
{% if pkg.es_index_patterns is defined and pkg.es_index_patterns is not none %}
{% for pattern in pkg.es_index_patterns %}
{% if "metrics-" in pattern.name %}
{% set integration_type = "metrics-" %}
{% elif "logs-" in pattern.name %}
{% set integration_type = "logs-" %}
{% else %}
{% set integration_type = "" %}
{% endif %}
{% set component_name = pkg.name ~ "." ~ pattern.title %}
{% set index_pattern = pattern.name %}
{# fix weirdly named components #}
{% if component_name in WEIRD_INTEGRATIONS %}
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
{% endif %}
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
{% set custom_component_name = component_name %}
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
{% set generic_integration_type = integration_type %}
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
{% set component_name_x = component_name.replace(".","_x_") %}
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
{% set component_name = "filestream.generic" %}
{% set generic_integration_type = "logs-" %}
{% endif %}
{# Default integration settings #}
{% set integration_defaults = {
"index_sorting": false,
"index_template": {
"composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
"data_stream": {
"allow_custom_routing": false,
"hidden": false
},
"ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
"index_patterns": [index_pattern],
"priority": 501,
"template": {
"settings": {
"index": {
"lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
"number_of_replicas": 0
}
}
}
},
"policy": {
"phases": {
"cold": {
"actions": {
"set_priority": {"priority": 0}
},
"min_age": "60d"
},
"delete": {
"actions": {
"delete": {}
},
"min_age": "365d"
},
"hot": {
"actions": {
"rollover": {
"max_age": "30d",
"max_primary_shard_size": "50gb"
},
"set_priority": {"priority": 100}
},
"min_age": "0ms"
},
"warm": {
"actions": {
"set_priority": {"priority": 50}
},
"min_age": "30d"
}
}
}
} %}
{% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
{% endfor %}
{% endif %}
{% endif %}
{% endfor %}

View File

@@ -1,6 +1,6 @@
elasticfleet: elasticfleet:
enabled: enabled:
description: Enables or disables the Elastic Fleet process. This process is critical for managing Elastic Agents. description: You can enable or disable Elastic Fleet.
advanced: True advanced: True
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
enable_manager_output: enable_manager_output:
@@ -9,24 +9,6 @@ elasticfleet:
global: True global: True
forcedType: bool forcedType: bool
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
files:
soc:
elastic-defend-disabled-filters__yaml:
title: Disabled Elastic Defend filters
description: Enter the ID of the filter that should be disabled.
syntax: yaml
file: True
global: True
helpLink: elastic-fleet.html
advanced: True
elastic-defend-custom-filters__yaml:
title: Custom Elastic Defend filters
description: Enter custom filters seperated by ---
syntax: yaml
file: True
global: True
helpLink: elastic-fleet.html
advanced: True
logging: logging:
zeek: zeek:
excluded: excluded:
@@ -34,22 +16,6 @@ elasticfleet:
forcedType: "[]string" forcedType: "[]string"
helpLink: zeek.html helpLink: zeek.html
config: config:
defend_filters:
enable_auto_configuration:
description: Enable auto-configuration and management of the Elastic Defend Exclusion filters.
global: True
helpLink: elastic-fleet.html
advanced: True
subscription_integrations:
description: Enable the installation of integrations that require an Elastic license.
global: True
forcedType: bool
helpLink: elastic-fleet.html
auto_upgrade_integrations:
description: Enables or disables automatically upgrading Elastic Agent integrations.
global: True
forcedType: bool
helpLink: elastic-fleet.html
server: server:
custom_fqdn: custom_fqdn:
description: Custom FQDN for Agents to connect to. One per line. description: Custom FQDN for Agents to connect to. One per line.
@@ -113,29 +79,3 @@ elasticfleet:
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
advanced: True advanced: True
forcedType: int forcedType: int
kismet:
base_url:
description: Base URL for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
api_key:
description: API key for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
enabled_nodes:
description: Fleet nodes with the Kismet integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"

View File

@@ -1,251 +0,0 @@
from datetime import datetime
import sys
import getopt
from so_elastic_defend_filters_helper import *
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
# Define mappings for Target Field, Event Type, Conditions
TARGET_FIELD_MAPPINGS = {
"Image": "process.executable",
"ParentImage": "process.parent.executable",
"CommandLine": "process.command_line",
"ParentCommandLine": "process.parent.command_line",
"DestinationHostname": "destination.domain",
"QueryName": "dns.question.name",
"DestinationIp": "destination.ip",
"TargetObject": "registry.path",
"TargetFilename": "file.path"
}
DATASET_MAPPINGS = {
"process_create": "endpoint.events.process",
"network_connection": "endpoint.events.network",
"file_create": "endpoint.events.file",
"file_delete": "endpoint.events.file",
"registry_event": "endpoint.events.registry",
"dns_query": "endpoint.events.network"
}
CONDITION_MAPPINGS = {
"is": ("included", "match"),
"end with": ("included", "wildcard"),
"begin with": ("included", "wildcard"),
"contains": ("included", "wildcard")
}
# Extract entries for a rule
def extract_entries(data, event_type):
entries = []
filter_data = data.get('filter', {})
for value in filter_data.values():
target_field = TARGET_FIELD_MAPPINGS.get(value.get('TargetField', ''))
condition = value.get('Condition', '')
pattern = value.get('Pattern', '')
if condition not in CONDITION_MAPPINGS:
logging.error(f"Invalid condition: {condition}")
# Modify the pattern based on the condition
pattern = modify_pattern(condition, pattern)
operator, match_type = CONDITION_MAPPINGS[condition]
entries.append({
"field": target_field,
"operator": operator,
"type": match_type,
"value": pattern
})
# Add the event.dataset entry from DATASET_MAPPINGS
dataset_value = DATASET_MAPPINGS.get(event_type, '')
if dataset_value:
entries.append({
"field": "event.dataset",
"operator": "included",
"type": "match",
"value": dataset_value
})
else:
logging.error(f"No dataset mapping found for event_type: {event_type}")
return entries
# Build the JSON
def build_json_entry(entries, guid, event_type, context):
return {
"comments": [],
"entries": entries,
"item_id": guid,
"name": f"SO - {event_type} - {guid}",
"description": f"{context}\n\n <<- Note: This filter is managed by Security Onion. ->>",
"namespace_type": "agnostic",
"tags": ["policy:all"],
"type": "simple",
"os_types": ["windows"],
"entries": entries
}
# Check to see if the rule is disabled
# If it is, make sure it is not active
def disable_check(guid, disabled_rules, username, password):
if guid in disabled_rules:
logging.info(f"Rule {guid} is in the disabled rules list, confirming that is is actually disabled...")
existing_rule = api_request("GET", guid, username, password)
if existing_rule:
if api_request("DELETE", guid, username, password):
logging.info(f"Successfully deleted rule {guid}")
return True, "deleted"
else:
logging.error(f"Error deleting rule {guid}.")
return True, "Error deleting"
return True, "NOP"
return False, None
def modify_pattern(condition, pattern):
"""
Modify the pattern based on the condition.
- 'end with': Add '*' to the beginning of the pattern.
- 'begin with': Add '*' to the end of the pattern.
- 'contains': Add '*' to both the beginning and end of the pattern.
"""
if isinstance(pattern, list):
# Apply modification to each pattern in the list if it's a list of patterns
return [modify_pattern(condition, p) for p in pattern]
if condition == "end with":
return f"*{pattern}"
elif condition == "begin with":
return f"{pattern}*"
elif condition == "contains":
return f"*{pattern}*"
return pattern
def process_rule_update_or_create(guid, json_entry, username, password):
existing_rule = api_request("GET", guid, username, password)
if existing_rule:
existing_rule_data = extract_relevant_fields(existing_rule)
new_rule_data = extract_relevant_fields(json_entry)
if generate_hash(existing_rule_data) != generate_hash(new_rule_data):
logging.info(f"Updating rule {guid}")
json_entry.pop("list_id", None)
api_request("PUT", guid, username, password, json_data=json_entry)
return "updated"
logging.info(f"Rule {guid} is up to date.")
return "no_change"
else:
logging.info(f"Creating new rule {guid}")
json_entry["list_id"] = "endpoint_event_filters"
api_request("POST", guid, username, password, json_data=json_entry)
return "new"
# Main function for processing rules
def process_rules(yaml_files, disabled_rules, username, password):
stats = {"rule_count": 0, "new": 0, "updated": 0, "no_change": 0, "disabled": 0, "deleted": 0}
for data in yaml_files:
logging.info(f"Processing rule: {data.get('id', '')}")
event_type = data.get('event_type', '')
guid = data.get('id', '')
dataset = DATASET_MAPPINGS.get(event_type, '')
context = data.get('description', '')
rule_deleted, state = disable_check(guid, disabled_rules, username, password)
if rule_deleted:
stats["disabled"] += 1
if state == "deleted":
stats["deleted"] += 1
continue
# Extract entries and build JSON
entries = extract_entries(data, event_type)
json_entry = build_json_entry(entries, guid, event_type, context)
# Process rule creation or update
status = process_rule_update_or_create(guid, json_entry, username, password)
stats[status] += 1
stats["rule_count"] += 1
return stats
def parse_args(argv):
try:
opts, args = getopt.getopt(argv, "i:d:c:f:", ["input=", "disabled=", "credentials=", "flags_file="])
except getopt.GetoptError:
print("Usage: python so-elastic-defend-manage-filters.py -c <credentials_file> -d <disabled_file> -i <folder_of_yaml_files> [-f <flags_file>]")
sys.exit(2)
return opts
def load_flags(file_path):
with open(file_path, 'r') as flags_file:
return flags_file.read().splitlines()
def validate_inputs(credentials_file, disabled_file, yaml_directories):
if not credentials_file or not disabled_file or not yaml_directories:
print("Usage: python so-elastic-defend-manage-filters.py -c <credentials_file> -d <disabled_file> -i <folder_of_yaml_files> [-f <flags_file>]")
sys.exit(2)
def main(argv):
credentials_file = ""
disabled_file = ""
yaml_directories = []
opts = parse_args(argv)
for opt, arg in opts:
if opt in ("-c", "--credentials"):
credentials_file = arg
elif opt in ("-d", "--disabled"):
disabled_file = arg
elif opt in ("-i", "--input"):
yaml_directories.append(arg)
elif opt in ("-f", "--flags_file"):
flags = load_flags(arg)
return main(argv + flags)
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"\n{timestamp}")
validate_inputs(credentials_file, disabled_file, yaml_directories)
credentials = load_credentials(credentials_file)
if not credentials:
raise Exception("Failed to load credentials")
username, password = extract_auth_details(credentials)
if not username or not password:
raise Exception("Invalid credentials format")
custom_rules_input = '/opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters-raw'
custom_rules_output = '/opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters'
prepare_custom_rules(custom_rules_input, custom_rules_output)
disabled_rules = load_disabled(disabled_file)
total_stats = {"rule_count": 0, "new": 0, "updated": 0, "no_change": 0, "disabled": 0, "deleted": 0}
for yaml_dir in yaml_directories:
yaml_files = load_yaml_files(yaml_dir)
stats = process_rules(yaml_files, disabled_rules, username, password)
for key in total_stats:
total_stats[key] += stats[key]
logging.info(f"\nProcessing Summary")
logging.info(f" - Total processed rules: {total_stats['rule_count']}")
logging.info(f" - New rules: {total_stats['new']}")
logging.info(f" - Updated rules: {total_stats['updated']}")
logging.info(f" - Disabled rules: {total_stats['deleted']}")
logging.info(f" - Rules with no changes: {total_stats['no_change']}")
logging.info(f"Rule status Summary")
logging.info(f" - Active rules: {total_stats['rule_count'] - total_stats['disabled']}")
logging.info(f" - Disabled rules: {total_stats['disabled']}")
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"Execution completed at: {timestamp}")
if __name__ == "__main__":
main(sys.argv[1:])

View File

@@ -23,13 +23,6 @@ fi
# Define a banner to separate sections # Define a banner to separate sections
banner="=========================================================================" banner="========================================================================="
fleet_api() {
local QUERYPATH=$1
shift
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
}
elastic_fleet_integration_check() { elastic_fleet_integration_check() {
AGENT_POLICY=$1 AGENT_POLICY=$1
@@ -46,9 +39,7 @@ elastic_fleet_integration_create() {
JSON_STRING=$1 JSON_STRING=$1
if ! fleet_api "package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
@@ -65,10 +56,7 @@ elastic_fleet_integration_remove() {
'{"packagePolicyIds":[$INTEGRATIONID]}' '{"packagePolicyIds":[$INTEGRATIONID]}'
) )
if ! fleet_api "package_policies/delete" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo "Error: Unable to delete '$NAME' from '$AGENT_POLICY'"
return 1
fi
} }
elastic_fleet_integration_update() { elastic_fleet_integration_update() {
@@ -77,9 +65,7 @@ elastic_fleet_integration_update() {
JSON_STRING=$2 JSON_STRING=$2
if ! fleet_api "package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPUT -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_integration_policy_upgrade() { elastic_fleet_integration_policy_upgrade() {
@@ -91,117 +77,29 @@ elastic_fleet_integration_policy_upgrade() {
'{"packagePolicyIds":[$INTEGRATIONID]}' '{"packagePolicyIds":[$INTEGRATIONID]}'
) )
if ! fleet_api "package_policies/upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_package_version_check() { elastic_fleet_package_version_check() {
PACKAGE=$1 PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
if output=$(fleet_api "epm/packages/$PACKAGE"); then
echo "$output" | jq -r '.item.version'
else
echo "Error: Failed to get current package version for '$PACKAGE'"
return 1
fi
} }
elastic_fleet_package_latest_version_check() { elastic_fleet_package_latest_version_check() {
PACKAGE=$1 PACKAGE=$1
if output=$(fleet_api "epm/packages/$PACKAGE"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.latestVersion'
if version=$(jq -e -r '.item.latestVersion' <<< $output); then
echo "$version"
fi
else
echo "Error: Failed to get latest version for '$PACKAGE'"
return 1
fi
} }
elastic_fleet_package_install() { elastic_fleet_package_install() {
PKG=$1 PKG=$1
VERSION=$2 VERSION=$2
if ! fleet_api "epm/packages/$PKG/$VERSION" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}'; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
return 1
fi
} }
elastic_fleet_bulk_package_install() { elastic_fleet_package_is_installed() {
BULK_PKG_LIST=$1 PACKAGE=$1
if ! fleet_api "epm/packages/_bulk" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$BULK_PKG_LIST; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
return 1
fi
}
elastic_fleet_installed_packages() {
if ! fleet_api "epm/packages/installed?perPage=500"; then
return 1
fi
}
elastic_fleet_agent_policy_ids() {
if output=$(fleet_api "agent_policies"); then
echo "$output" | jq -r .items[].id
else
echo "Error: Failed to retrieve agent policies."
return 1
fi
}
elastic_fleet_integration_policy_names() {
AGENT_POLICY=$1
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
echo "$output" | jq -r .item.package_policies[].name
else
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
return 1
fi
}
elastic_fleet_integration_policy_package_name() {
AGENT_POLICY=$1
INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
else
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
return 1
fi
}
elastic_fleet_integration_policy_package_version() {
AGENT_POLICY=$1
INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< "$output"); then
echo "$version"
fi
else
echo "Error: Failed to retrieve integration version for '$INTEGRATION' in policy '$AGENT_POLICY'"
return 1
fi
}
elastic_fleet_integration_id() {
AGENT_POLICY=$1
INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
else
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
return 1
fi
}
elastic_fleet_integration_policy_dryrun_upgrade() {
INTEGRATION_ID=$1
if ! fleet_api "package_policies/upgrade/dryrun" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -XPOST -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"; then
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
return 1
fi
} }
elastic_fleet_policy_create() { elastic_fleet_policy_create() {
@@ -219,8 +117,15 @@ elastic_fleet_policy_create() {
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}' '{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
) )
# Create Fleet Policy # Create Fleet Policy
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_policy_update() {
POLICYID=$1
JSON_STRING=$2
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}

View File

@@ -8,7 +8,6 @@
. /usr/sbin/so-elastic-fleet-common . /usr/sbin/so-elastic-fleet-common
ERROR=false
# Manage Elastic Defend Integration for Initial Endpoints Policy # Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do do
@@ -16,20 +15,9 @@ do
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n" printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
ERROR=true
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
ERROR=true
continue
fi
fi fi
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi

View File

@@ -1,33 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-elastic-fleet-common
# Get all the fleet policies
json_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true')
# Extract the IDs that start with "FleetServer_"
POLICY=$(echo "$json_output" | jq -r '.items[] | select(.id | startswith("FleetServer_")) | .id')
# Iterate over each ID in the POLICY variable
for POLICYNAME in $POLICY; do
printf "\nUpdating Policy: $POLICYNAME\n"
# First get the Integration ID
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$POLICYNAME" | jq -r '.item.package_policies[] | select(.package.name == "fleet_server") | .id')
# Modify the default integration policy to update the policy_id and an with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "$POLICYNAME" --arg name "fleet_server-$POLICYNAME" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Now update the integration policy using the modified JSON
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"; then
# exit 1 on failure to update fleet integration policies, let salt handle retries
echo "Failed to update $POLICYNAME.."
exit 1
fi
done

View File

@@ -12,11 +12,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# First, check for any package upgrades # First, check for any package upgrades
/usr/sbin/so-elastic-fleet-package-upgrade /usr/sbin/so-elastic-fleet-package-upgrade
# Second, update Fleet Server policies # Second, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
# Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do do
@@ -24,18 +22,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
done done
@@ -46,18 +36,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
done done
if [[ "$RETURN_CODE" != "1" ]]; then if [[ "$RETURN_CODE" != "1" ]]; then
@@ -71,19 +53,11 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
fi fi
done done
@@ -100,19 +74,11 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION" elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
fi fi
fi fi

View File

@@ -10,6 +10,6 @@
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}') SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
# List configured package policies # List configured package policies
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages?prerelease=true" -H 'kbn-xsrf: true' | jq curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages" -H 'kbn-xsrf: true' | jq
echo echo

View File

@@ -1,128 +0,0 @@
import hashlib
import os
import json
import yaml
import requests
from requests.auth import HTTPBasicAuth
import shutil
# Extract 'entries', 'description' and 'os_types' fields
def extract_relevant_fields(filter):
return {
'entries': filter.get('entries', []),
'description': filter.get('description', '')
}
# Sort for consistency, so that a hash can be generated
def sorted_data(value):
if isinstance(value, dict):
# Recursively sort the dictionary by key
return {k: sorted_data(v) for k, v in sorted(value.items())}
elif isinstance(value, list):
# Sort lists; for dictionaries, sort by a specific key
return sorted(value, key=lambda x: tuple(sorted(x.items())) if isinstance(x, dict) else x)
return value
# Generate a hash based on sorted relevant fields
def generate_hash(data):
sorted_data_string = json.dumps(sorted_data(data), sort_keys=True)
return hashlib.sha256(sorted_data_string.encode('utf-8')).hexdigest()
# Load Elasticsearch credentials from the config file
def load_credentials(config_path):
with open(config_path, 'r') as file:
for line in file:
if line.startswith("user"):
credentials = line.split('=', 1)[1].strip().strip('"')
return credentials
return None
# Extract username and password from credentials
def extract_auth_details(credentials):
if ':' in credentials:
return credentials.split(':', 1)
return None, None
# Generalized API request function
def api_request(method, guid, username, password, json_data=None):
headers = {
'kbn-xsrf': 'true',
'Content-Type': 'application/json'
}
auth = HTTPBasicAuth(username, password)
if method == "POST":
url = "http://localhost:5601/api/exception_lists/items?namespace_type=agnostic"
else:
url = f"http://localhost:5601/api/exception_lists/items?item_id={guid}&namespace_type=agnostic"
response = requests.request(method, url, headers=headers, auth=auth, json=json_data)
if response.status_code in [200, 201]:
return response.json() if response.content else True
elif response.status_code == 404 and method == "GET":
return None
else:
print(f"Error with {method} request: {response.status_code} - {response.text}")
return False
# Load YAML data for GUIDs to skip
def load_disabled(disabled_file_path):
if os.path.exists(disabled_file_path):
with open(disabled_file_path, 'r') as file:
return yaml.safe_load(file) or {}
return {}
def load_yaml_files(*dirs):
yaml_files = []
for dir_path in dirs:
if os.path.isdir(dir_path):
# Recurse through the directory and subdirectories
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if file_name.endswith(".yaml"):
full_path = os.path.join(root, file_name)
with open(full_path, 'r') as f:
try:
yaml_content = yaml.safe_load(f)
yaml_files.append(yaml_content)
except yaml.YAMLError as e:
print(f"Error loading {full_path}: {e}")
else:
print(f"Invalid directory: {dir_path}")
return yaml_files
def prepare_custom_rules(input_file, output_dir):
# Clear the output directory first
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir, exist_ok=True)
try:
# Load the YAML file
with open(input_file, 'r') as f:
docs = yaml.safe_load_all(f)
for doc in docs:
if 'id' not in doc:
print(f"Skipping rule, no 'id' found: {doc}")
continue
if doc.get('title') in ["Template 1", "Template 2"]:
print(f"Skipping template rule with title: {doc['title']}")
continue
# Create a filename using the 'id' field
file_name = os.path.join(output_dir, f"{doc['id']}.yaml")
# Write the individual YAML file
with open(file_name, 'w') as output_file:
yaml.dump(doc, output_file, default_flow_style=False)
print(f"Created file: {file_name}")
except yaml.YAMLError as e:
print(f"Error parsing YAML: {e}")
except Exception as e:
print(f"Error processing file: {e}")

View File

@@ -13,16 +13,13 @@
LOG="/opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log" LOG="/opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log"
# get the variables needed such as ELASTIC_AGENT_TARBALL_VERSION
get_elastic_agent_vars
# Check to see if we are already running # Check to see if we are already running
NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers") NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING gen installers script processes running...exiting." >>$LOG && exit 0 [ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING gen installers script processes running...exiting." >>$LOG && exit 0
for i in {1..30} for i in {1..30}
do do
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys?perPage=100" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',') FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',')
if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi
done done
@@ -39,7 +36,6 @@ printf "\n### Creating a temp directory at /nsm/elastic-agent-workspace\n"
rm -rf /nsm/elastic-agent-workspace rm -rf /nsm/elastic-agent-workspace
mkdir -p /nsm/elastic-agent-workspace mkdir -p /nsm/elastic-agent-workspace
printf "\n### Extracting outer tarball and then each individual tarball/zip\n" printf "\n### Extracting outer tarball and then each individual tarball/zip\n"
tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz -C /nsm/elastic-agent-workspace/ tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz -C /nsm/elastic-agent-workspace/
unzip -q /nsm/elastic-agent-workspace/elastic-agent-*.zip -d /nsm/elastic-agent-workspace/ unzip -q /nsm/elastic-agent-workspace/elastic-agent-*.zip -d /nsm/elastic-agent-workspace/
@@ -76,17 +72,5 @@ do
printf "\n### $GOOS/$GOARCH Installer Generated...\n" printf "\n### $GOOS/$GOARCH Installer Generated...\n"
done done
printf "\n\n### Generating MSI...\n" printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace"
cp /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64 /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64.exe
docker run \
--mount type=bind,source=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/,target=/output/ -w /output \
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} wixl -o so-elastic-agent_windows_amd64_msi --arch x64 /workspace/so-elastic-agent.wxs
printf "\n### MSI Generated...\n"
printf "\n### Cleaning up temp files \n"
rm -rf /nsm/elastic-agent-workspace rm -rf /nsm/elastic-agent-workspace
rm -rf /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64.exe
printf "\n### Copying so_agent-installers to /nsm/elastic-fleet/ for nginx.\n"
\cp -vr /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/ /nsm/elastic-fleet/
chmod 644 /nsm/elastic-fleet/so_agent-installers/*

View File

@@ -5,7 +5,6 @@
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
# Only run on Managers # Only run on Managers
if ! is_manager_node; then if ! is_manager_node; then
@@ -14,7 +13,7 @@ if ! is_manager_node; then
fi fi
# Get current list of Grid Node Agents that need to be upgraded # Get current list of Grid Node Agents that need to be upgraded
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true") RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=policy_id%20%3A%20so-grid-nodes_%2A&showInactive=false&showUpgradeable=true&getStatusSummary=true")
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
@@ -28,10 +27,10 @@ OUTDATED_LIST=$(jq -r '.items | map(.id) | (tojson)' <<< "$RAW_JSON")
if [ "$OUTDATED_LIST" != '[]' ]; then if [ "$OUTDATED_LIST" != '[]' ]; then
AGENTNUMBERS=$(jq -r '.total' <<< "$RAW_JSON") AGENTNUMBERS=$(jq -r '.total' <<< "$RAW_JSON")
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n" printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic $ELASTIC_AGENT_TARBALL_VERSION...\n\n"
# Generate updated JSON payload # Generate updated JSON payload
JSON_STRING=$(jq -n --arg ELASTICVERSION {{ELASTICSEARCHDEFAULTS.elasticsearch.version}} --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }') JSON_STRING=$(jq -n --arg ELASTICVERSION $ELASTIC_AGENT_TARBALL_VERSION --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
# Update Node Agents # Update Node Agents
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"

View File

@@ -1,95 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
{%- set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
. /usr/sbin/so-elastic-fleet-common
curl_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/)
if [ $? -ne 0 ]; then
echo "Error: Failed to connect to Kibana."
exit 1
fi
IFS=$'\n'
agent_policies=$(elastic_fleet_agent_policy_ids)
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
ERROR=false
for AGENT_POLICY in $agent_policies; do
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
# this script upgrades default integration packages, exit 1 and let salt handle retrying
exit 1
fi
for INTEGRATION in $integrations; do
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
# Get package name so we know what package to look for when checking the current and latest available version
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
exit 1
fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
{%- endif %}
# Get currently installed version of package
attempt=0
max_attempts=3
while [ $attempt -lt $max_attempts ]; do
if PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION") && AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME"); then
break
fi
attempt=$((attempt + 1))
done
if [ $attempt -eq $max_attempts ]; then
echo "Error: Failed getting $PACKAGE_VERSION or $AVAILABLE_VERSION"
exit 1
fi
# Get integration ID
if ! INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION"); then
exit 1
fi
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
# Dry run of the upgrade
echo ""
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
echo "Upgrading $INTEGRATION..."
echo "Starting dry run..."
if ! DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID"); then
exit 1
fi
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
# If no errors with dry run, proceed with actual upgrade
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
echo "No errors detected. Proceeding with upgrade..."
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
ERROR=true
continue
fi
else
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
ERROR=true
continue
fi
fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
fi
{%- endif %}
fi
done
done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi
echo

View File

@@ -1,202 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% set SUB = salt['pillar.get']('elasticfleet:config:subscription_integrations', default=false) %}
{% set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
. /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common
# Check that /opt/so/state/estemplates.txt exists to signal that Elasticsearch
# has completed its first run of core-only integrations/indices/components/ilm
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
INSTALLED_PACKAGE_LIST=/tmp/esfleet_installed_packages.json
BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
PENDING_UPDATE=false
# Integrations which are included in the package registry, but excluded from automatic installation via this script.
# Requiring some level of manual Elastic Stack configuration before installation
EXCLUDED_INTEGRATIONS=('apm')
version_conversion(){
version=$1
echo "$version" | awk -F '.' '{ printf("%d%03d%03d\n", $1, $2, $3); }'
}
compare_versions() {
version1=$1
version2=$2
# Convert versions to numbers
num1=$(version_conversion "$version1")
num2=$(version_conversion "$version2")
# Compare using bc
if (( $(echo "$num1 < $num2" | bc -l) )); then
echo "less"
elif (( $(echo "$num1 > $num2" | bc -l) )); then
echo "greater"
else
echo "equal"
fi
}
IFS=$'\n'
agent_policies=$(elastic_fleet_agent_policy_ids)
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
in_use_integrations=()
for AGENT_POLICY in $agent_policies; do
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
# skip the agent policy if we can't get required info, let salt retry. Integrations loaded by this script are non-default integrations.
echo "Skipping $AGENT_POLICY.. "
continue
fi
for INTEGRATION in $integrations; do
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
echo "Not adding $INTEGRATION, couldn't get package name"
continue
fi
# non-default integrations that are in-use in any policy
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
in_use_integrations+=("$PACKAGE_NAME")
fi
done
done
if [[ -f $STATE_FILE_SUCCESS ]]; then
if retry 3 1 "curl -s -K /opt/so/conf/elasticsearch/curl.config --output /dev/null --silent --head --fail localhost:5601/api/fleet/epm/packages"; then
# Package_list contains all integrations beta / non-beta.
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do
# get package details
package_name=$(echo "$package" | jq -r '.name')
latest_version=$(echo "$package" | jq -r '.latest_version')
installed_version=$(echo "$package" | jq -r '.installed_version')
subscription=$(echo "$package" | jq -r '.subscription')
bulk_package=$(echo "$package" | jq '{name: .name, version: .latest_version}' )
if [[ ! "${EXCLUDED_INTEGRATIONS[@]}" =~ "$package_name" ]]; then
{% if not SUB %}
if [[ "$subscription" != "basic" && "$subscription" != "null" && -n "$subscription" ]]; then
# pass over integrations that require non-basic elastic license
echo "$package_name integration requires an Elastic license of $subscription or greater... skipping"
continue
else
if [[ "$installed_version" == "null" || -z "$installed_version" ]]; then
echo "$package_name is not installed... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
else
results=$(compare_versions "$latest_version" "$installed_version")
if [ $results == "greater" ]; then
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
{%- endif %}
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
else
echo "skipping available upgrade for in use integration - $package_name."
fi
{%- endif %}
fi
fi
fi
{% else %}
if [[ "$installed_version" == "null" || -z "$installed_version" ]]; then
echo "$package_name is not installed... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
else
results=$(compare_versions "$latest_version" "$installed_version")
if [ $results == "greater" ]; then
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
{%- endif %}
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
PENDING_UPDATE=true
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
else
echo "skipping available upgrade for in use integration - $package_name."
fi
{%- endif %}
fi
fi
{% endif %}
else
echo "Skipping $package_name..."
fi
done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")"
if [ "$PENDING_UPDATE" = true ]; then
# Run chunked install of packages
echo "" > $BULK_INSTALL_OUTPUT
pkg_group=1
pkg_filename="${BULK_INSTALL_PACKAGE_LIST%.json}"
jq -c '.packages | _nwise(25)' $BULK_INSTALL_PACKAGE_LIST | while read -r line; do
echo "$line" | jq '{ "packages": . }' > "${pkg_filename}_${pkg_group}.json"
pkg_group=$((pkg_group + 1))
done
for file in "${pkg_filename}_"*.json; do
[ -e "$file" ] || continue
if ! elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT; then
# integrations loaded my this script are non-essential and shouldn't cause exit, skip them for now next highstate run can retry
echo "Failed to complete a chunk of bulk package installs -- $file "
continue
fi
done
# cleanup any temp files for chunked package install
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
else
echo "Elastic integrations don't appear to need installation/updating..."
fi
# Write out file for generating index/component/ilm templates
if latest_installed_package_list=$(elastic_fleet_installed_packages); then
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
fi
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
# Refresh installed component template list
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
echo $latest_component_templates_list > $COMPONENT_TEMPLATES
fi
else
# This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run.
echo "Elastic Fleet does not appear to be responding... Exiting... "
exit 0
fi
else
# This message will appear when an update to core integration is made and this script is run at the same time as
# elasticsearch.enabled -> detects change to core index settings -> deletes estemplates.txt
echo "Elasticsearch may not be fully configured yet or is currently updating core index settings."
exit 0
fi

View File

@@ -15,78 +15,13 @@ if ! is_manager_node; then
fi fi
function update_logstash_outputs() { function update_logstash_outputs() {
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then # Generate updated JSON payload
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl') JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SECRETS "$SECRETS" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
fi
fi
# Update Logstash Outputs # Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
} }
function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
# Update policy when fleet has secrets enabled
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
--argjson SECRETS "$SECRETS" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else
# Update policy when fleet has secrets disabled or policy hasn't been force updated
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
fi
# Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
else
printf "Failed to get current Kafka output policy..."
exit 1
fi
}
{% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_kafka" ]; then
printf "Failed to query for current Kafka Outputs..."
exit 1
fi
# Get the current list of kafka outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=()
# Query for the current Grid Nodes that are running kafka
KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local')
# Query for Kafka nodes with Broker role and add hostname to list
while IFS= read -r line; do
NEW_LIST+=("$line")
done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES)
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %}
# Get current list of Logstash Outputs # Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash') RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
@@ -144,8 +79,6 @@ function update_kafka_outputs() {
done done
fi fi
{% endif %}
# Sort & hash the new list of Logstash Outputs # Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
@@ -154,28 +87,9 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n" printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
# Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed
printf "Checking if the correct output policy is set as default\n"
OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON)
OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON)
if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then
printf "Default output policy needs to be updated.\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
else
printf "Default output policy is set - no update needed.\n"
fi
exit 0 exit 0
else else
printf "\nHashes don't match - update needed.\n" printf "\nHashes don't match - update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs update_logstash_outputs
{%- endif %}
fi fi

View File

@@ -10,16 +10,8 @@
{%- for PACKAGE in SUPPORTED_PACKAGES %} {%- for PACKAGE in SUPPORTED_PACKAGES %}
echo "Setting up {{ PACKAGE }} package..." echo "Setting up {{ PACKAGE }} package..."
if VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}"); then VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
# packages loaded by this script should never fail to install and REQUIRED before an installation of SO can be considered successful
echo -e "\nERROR: Failed to install default integration package -- $PACKAGE $VERSION"
exit 1
fi
else
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
exit 1
fi
echo echo
{%- endfor %} {%- endfor %}
echo echo

View File

@@ -10,15 +10,8 @@
{%- for PACKAGE in SUPPORTED_PACKAGES %} {%- for PACKAGE in SUPPORTED_PACKAGES %}
echo "Upgrading {{ PACKAGE }} package..." echo "Upgrading {{ PACKAGE }} package..."
if VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}"); then VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}")
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
# exit 1 on failure to upgrade a default package, allow salt to handle retries
echo -e "\nERROR: Failed to upgrade $PACKAGE to version: $VERSION"
exit 1
fi
else
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
fi
echo echo
{%- endfor %} {%- endfor %}
echo echo

Some files were not shown because too many files have changed in this diff Show More