Compare commits

..

21 Commits

Author SHA1 Message Date
Mike Reeves
9ddd01748c Merge pull request #15598 from Security-Onion-Solutions/2.4/main
Merge patch into dev
2026-03-13 10:48:54 -04:00
Mike Reeves
89e470059e Merge pull request #15597 from Security-Onion-Solutions/2.4.211
2.4.211
2026-03-12 13:18:19 -04:00
Mike Reeves
79b30e43d9 2.4.211 2026-03-12 11:33:11 -04:00
Mike Reeves
5cebce32f7 2.4.211 2026-03-12 11:31:59 -04:00
Josh Patterson
810681c92e Merge pull request #15593 from Security-Onion-Solutions/ulimit
set container ulimits to default
2026-03-11 14:40:40 -04:00
Josh Patterson
51f9104d0f set container ulimits to default 2026-03-11 14:37:43 -04:00
Mike Reeves
8da5ed673b Merge pull request #15586 from Security-Onion-Solutions/TOoSmOotH-patch-4
Add support for version 2.4.211 in soup script
2026-03-11 12:16:49 -04:00
Josh Patterson
83ba40b548 Merge pull request #15588 from Security-Onion-Solutions/m0duspwnens-patch-1
clear HOTFIX file
2026-03-11 12:16:21 -04:00
Josh Patterson
7de8528b34 clear HOTFIX file 2026-03-11 12:14:48 -04:00
Mike Reeves
e6bd57e08d Fix conditional check for POSTVERSION 2.4.211 2026-03-11 12:13:05 -04:00
Mike Reeves
06664440ad Add support for version 2.4.211 in soup script 2026-03-11 12:10:28 -04:00
Josh Patterson
bd31f2898b Merge pull request #15584 from Security-Onion-Solutions/hypefix
remove 10T virtual disk limit. URL_BASE to vm hosts file
2026-03-11 11:58:46 -04:00
Josh Patterson
5bf9d92b52 add URL_BASE to vm hosts file 2026-03-11 11:55:42 -04:00
Josh Patterson
48c369ed11 remove 10T limit for virtual disk 2026-03-11 11:55:01 -04:00
Josh Patterson
7fec2d59a7 Merge pull request #15583 from Security-Onion-Solutions/m0duspwnens-patch-1
fix enable/disable suricata pcap
2026-03-11 11:52:53 -04:00
Mike Reeves
a0ad589c3a Merge pull request #15582 from Security-Onion-Solutions/TOoSmOotH-patch-3
Bump version from 2.4.210 to 2.4.211
2026-03-11 11:48:51 -04:00
Mike Reeves
0bd54e2835 Add version 2.4.211 to discussion template 2026-03-11 11:44:57 -04:00
Mike Reeves
58f5c56b72 Bump version from 2.4.210 to 2.4.211 2026-03-11 11:43:42 -04:00
Josh Patterson
6472c610d0 fix enable/disable suricata pcap
suricata pcap can now be enabled/disabled through pcap:enabled grid config / pillar
2026-03-10 11:01:11 -04:00
Mike Reeves
179c1ea7f7 Merge pull request #15570 from Security-Onion-Solutions/TOoSmOotH-patch-1
Add date to HOTFIX file
2026-03-10 10:20:16 -04:00
Mike Reeves
db964cad21 Add date to HOTFIX file 2026-03-10 10:18:25 -04:00
228 changed files with 3577 additions and 1228 deletions

View File

@@ -2,11 +2,13 @@ body:
- type: markdown - type: markdown
attributes: attributes:
value: | value: |
⚠️ This category is solely for conversations related to Security Onion 2.4 ⚠️
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
- type: dropdown - type: dropdown
attributes: attributes:
label: Version label: Version
description: Which version of Security Onion are you asking about? description: Which version of Security Onion 2.4.x are you asking about?
options: options:
- -
- 2.4.10 - 2.4.10

View File

@@ -1,177 +0,0 @@
body:
- type: markdown
attributes:
value: |
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
- type: dropdown
attributes:
label: Version
description: Which version of Security Onion are you asking about?
options:
-
- 3.0.0
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Method
description: How did you install Security Onion?
options:
-
- Security Onion ISO image
- Cloud image (Amazon, Azure, Google)
- Network installation on Oracle 9 (unsupported)
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Description
description: >
Is this discussion about installation, configuration, upgrading, or other?
options:
-
- installation
- configuration
- upgrading
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Type
description: >
When you installed, did you choose Import, Eval, Standalone, Distributed, or something else?
options:
-
- Import
- Eval
- Standalone
- Distributed
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Location
description: >
Is this deployment in the cloud, on-prem with Internet access, or airgap?
options:
-
- cloud
- on-prem with Internet access
- airgap
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Hardware Specs
description: >
Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://securityonion.net/docs/hardware?
options:
-
- Meets minimum requirements
- Exceeds minimum requirements
- Does not meet minimum requirements
- other (please provide detail below)
validations:
required: true
- type: input
attributes:
label: CPU
description: How many CPU cores do you have?
validations:
required: true
- type: input
attributes:
label: RAM
description: How much RAM do you have?
validations:
required: true
- type: input
attributes:
label: Storage for /
description: How much storage do you have for the / partition?
validations:
required: true
- type: input
attributes:
label: Storage for /nsm
description: How much storage do you have for the /nsm partition?
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Collection
description: >
Are you collecting network traffic from a tap or span port?
options:
-
- tap
- span port
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Speeds
description: >
How much network traffic are you monitoring?
options:
-
- Less than 1Gbps
- 1Gbps to 10Gbps
- more than 10Gbps
validations:
required: true
- type: dropdown
attributes:
label: Status
description: >
Does SOC Grid show all services on all nodes as running OK?
options:
-
- Yes, all services on all nodes are running OK
- No, one or more services are failed (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Salt Status
description: >
Do you get any failures when you run "sudo salt-call state.highstate"?
options:
-
- Yes, there are salt failures (please provide detail below)
- No, there are no failures
validations:
required: true
- type: dropdown
attributes:
label: Logs
description: >
Are there any additional clues in /opt/so/log/?
options:
-
- Yes, there are additional clues in /opt/so/log/ (please provide detail below)
- No, there are no additional clues
validations:
required: true
- type: textarea
attributes:
label: Detail
description: Please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and then provide detailed information to help us help you.
placeholder: |-
STOP! Before typing, please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 in their entirety!
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
validations:
required: true
- type: checkboxes
attributes:
label: Guidelines
options:
- label: I have read the discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and assert that I have followed the guidelines.
required: true

View File

@@ -13,7 +13,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.14"] python-version: ["3.13"]
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"] python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
steps: steps:

View File

@@ -1,17 +1,17 @@
### 2.4.210-20260302 ISO image released on 2026/03/02 ### 2.4.211-20260312 ISO image released on 2026/03/12
### Download and Verify ### Download and Verify
2.4.210-20260302 ISO image: 2.4.211-20260312 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.210-20260302.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.211-20260312.iso
MD5: 575F316981891EBED2EE4E1F42A1F016 MD5: 7082210AE9FF4D2634D71EAD4DC8F7A3
SHA1: 600945E8823221CBC5F1C056084A71355308227E SHA1: F76E08C47FD786624B2385B4235A3D61A4C3E9DC
SHA256: A6AA6471125F07FA6E2796430E94BEAFDEF728E833E9728FDFA7106351EBC47E SHA256: CE6E61788DFC492E4897EEDC139D698B2EDBEB6B631DE0043F66E94AF8A0FF4E
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.210-20260302.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.211-20260312.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.210-20260302.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.211-20260312.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.210-20260302.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.211-20260312.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.210-20260302.iso.sig securityonion-2.4.210-20260302.iso gpg --verify securityonion-2.4.211-20260312.iso.sig securityonion-2.4.211-20260312.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Mon 02 Mar 2026 11:55:24 AM EST using RSA key ID FE507013 gpg: Signature made Wed 11 Mar 2026 03:05:09 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.

1
HOTFIX
View File

@@ -0,0 +1 @@

View File

@@ -1,58 +1,50 @@
<p align="center"> ## Security Onion 2.4
<img src="https://securityonionsolutions.com/logo/logo-so-onion-dark.svg" width="400" alt="Security Onion Logo">
</p>
# Security Onion Security Onion 2.4 is here!
Security Onion is a free and open Linux distribution for threat hunting, enterprise security monitoring, and log management. It includes a comprehensive suite of tools designed to work together to provide visibility into your network and host activity. ## Screenshots
## ✨ Features Alerts
![Alerts](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/50_alerts.png)
Security Onion includes everything you need to monitor your network and host systems: Dashboards
![Dashboards](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/53_dashboards.png)
* **Security Onion Console (SOC)**: A unified web interface for analyzing security events and managing your grid. Hunt
* **Elastic Stack**: Powerful search backed by Elasticsearch. ![Hunt](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/56_hunt.png)
* **Intrusion Detection**: Network-based IDS with Suricata and host-based monitoring with Elastic Fleet.
* **Network Metadata**: Detailed network metadata generated by Zeek or Suricata.
* **Full Packet Capture**: Retain and analyze raw network traffic with Suricata PCAP.
## ⭐ Security Onion Pro Detections
![Detections](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/57_detections.png)
For organizations and enterprises requiring advanced capabilities, **Security Onion Pro** offers additional features designed for scale and efficiency: PCAP
![PCAP](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/62_pcap.png)
* **Onion AI**: Leverage powerful AI-driven insights to accelerate your analysis and investigations. Grid
* **Enterprise Features**: Enhanced tools and integrations tailored for enterprise-grade security operations. ![Grid](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/75_grid.png)
For more information, visit the [Security Onion Pro](https://securityonionsolutions.com/pro) page. Config
![Config](https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion-docs/2.4/images/87_config.png)
## ☁️ Cloud Deployment ### Release Notes
Security Onion is available and ready to deploy in the **AWS**, **Azure**, and **Google Cloud (GCP)** marketplaces. https://securityonion.net/docs/release-notes
## 🚀 Getting Started ### Requirements
| Goal | Resource | https://securityonion.net/docs/hardware
| :--- | :--- |
| **Download** | [Security Onion ISO](https://securityonion.net/docs/download) |
| **Requirements** | [Hardware Guide](https://securityonion.net/docs/hardware) |
| **Install** | [Installation Instructions](https://securityonion.net/docs/installation) |
| **What's New** | [Release Notes](https://securityonion.net/docs/release-notes) |
## 📖 Documentation & Support ### Download
For more detailed information, please visit our [Documentation](https://docs.securityonion.net). https://securityonion.net/docs/download
* **FAQ**: [Frequently Asked Questions](https://securityonion.net/docs/faq) ### Installation
* **Community**: [Discussions & Support](https://securityonion.net/docs/community-support)
* **Training**: [Official Training](https://securityonion.net/training)
## 🤝 Contributing https://securityonion.net/docs/installation
We welcome contributions! Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines on how to get involved. ### FAQ
## 🛡️ License https://securityonion.net/docs/faq
Security Onion is licensed under the terms of the license found in the [LICENSE](LICENSE) file. ### Feedback
--- https://securityonion.net/docs/community-support
*Built with 🧅 by Security Onion Solutions.*

View File

@@ -4,7 +4,6 @@
| Version | Supported | | Version | Supported |
| ------- | ------------------ | | ------- | ------------------ |
| 3.x | :white_check_mark: |
| 2.4.x | :white_check_mark: | | 2.4.x | :white_check_mark: |
| 2.3.x | :x: | | 2.3.x | :x: |
| 16.04.x | :x: | | 16.04.x | :x: |

View File

@@ -1 +1 @@
3.0.0 2.4.211

View File

@@ -87,6 +87,8 @@ base:
- zeek.adv_zeek - zeek.adv_zeek
- bpf.soc_bpf - bpf.soc_bpf
- bpf.adv_bpf - bpf.adv_bpf
- pcap.soc_pcap
- pcap.adv_pcap
- suricata.soc_suricata - suricata.soc_suricata
- suricata.adv_suricata - suricata.adv_suricata
- minions.{{ grains.id }} - minions.{{ grains.id }}
@@ -132,6 +134,8 @@ base:
- zeek.adv_zeek - zeek.adv_zeek
- bpf.soc_bpf - bpf.soc_bpf
- bpf.adv_bpf - bpf.adv_bpf
- pcap.soc_pcap
- pcap.adv_pcap
- suricata.soc_suricata - suricata.soc_suricata
- suricata.adv_suricata - suricata.adv_suricata
- minions.{{ grains.id }} - minions.{{ grains.id }}
@@ -181,6 +185,8 @@ base:
- zeek.adv_zeek - zeek.adv_zeek
- bpf.soc_bpf - bpf.soc_bpf
- bpf.adv_bpf - bpf.adv_bpf
- pcap.soc_pcap
- pcap.adv_pcap
- suricata.soc_suricata - suricata.soc_suricata
- suricata.adv_suricata - suricata.adv_suricata
- minions.{{ grains.id }} - minions.{{ grains.id }}
@@ -203,6 +209,8 @@ base:
- zeek.adv_zeek - zeek.adv_zeek
- bpf.soc_bpf - bpf.soc_bpf
- bpf.adv_bpf - bpf.adv_bpf
- pcap.soc_pcap
- pcap.adv_pcap
- suricata.soc_suricata - suricata.soc_suricata
- suricata.adv_suricata - suricata.adv_suricata
- strelka.soc_strelka - strelka.soc_strelka
@@ -289,6 +297,8 @@ base:
- zeek.adv_zeek - zeek.adv_zeek
- bpf.soc_bpf - bpf.soc_bpf
- bpf.adv_bpf - bpf.adv_bpf
- pcap.soc_pcap
- pcap.adv_pcap
- suricata.soc_suricata - suricata.soc_suricata
- suricata.adv_suricata - suricata.adv_suricata
- strelka.soc_strelka - strelka.soc_strelka

View File

@@ -1,14 +1,24 @@
from os import path
import subprocess import subprocess
def check(): def check():
osfam = __grains__['os_family']
retval = 'False' retval = 'False'
cmd = 'needs-restarting -r > /dev/null 2>&1' if osfam == 'Debian':
if path.exists('/var/run/reboot-required'):
retval = 'True'
try: elif osfam == 'RedHat':
needs_restarting = subprocess.check_call(cmd, shell=True) cmd = 'needs-restarting -r > /dev/null 2>&1'
except subprocess.CalledProcessError:
retval = 'True' try:
needs_restarting = subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError:
retval = 'True'
else:
retval = 'Unsupported OS: %s' % os
return retval return retval

View File

@@ -38,6 +38,7 @@
] %} ] %}
{% set sensor_states = [ {% set sensor_states = [
'pcap',
'suricata', 'suricata',
'healthcheck', 'healthcheck',
'tcpreplay', 'tcpreplay',

View File

@@ -1,15 +1,21 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set PCAP_BPF_STATUS = 0 %} {% set PCAP_BPF_STATUS = 0 %}
{% set STENO_BPF_COMPILED = "" %}
{% if GLOBALS.pcap_engine == "TRANSITION" %}
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
{% else %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %} {% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }} {{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %} {% set PCAPBPF = BPFMERGED.pcap %}
{% endif %}
{% if PCAPBPF %} {% if PCAPBPF %}
{% set PCAP_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %} {% set PCAP_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
{% if PCAP_BPF_CALC['retcode'] == 0 %} {% if PCAP_BPF_CALC['retcode'] == 0 %}
{% set PCAP_BPF_STATUS = 1 %} {% set PCAP_BPF_STATUS = 1 %}
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
{% endif %} {% endif %}
{% endif %} {% endif %}

View File

@@ -3,6 +3,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- docker - docker
@@ -16,3 +18,9 @@ trusttheca:
- show_changes: False - show_changes: False
- makedirs: True - makedirs: True
{% if GLOBALS.os_family == 'Debian' %}
symlinkca:
file.symlink:
- target: /etc/pki/tls/certs/intca.crt
- name: /etc/ssl/certs/intca.crt
{% endif %}

View File

@@ -0,0 +1,19 @@
{
"registry-mirrors": [
"https://:5000"
],
"bip": "172.17.0.1/24",
"default-address-pools": [
{
"base": "172.17.0.0/24",
"size": 24
}
],
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Soft": 1048576,
"Hard": 1048576
}
}
}

View File

@@ -20,6 +20,11 @@ kernel.printk:
sysctl.present: sysctl.present:
- value: "3 4 1 3" - value: "3 4 1 3"
# Remove variables.txt from /tmp - This is temp
rmvariablesfile:
file.absent:
- name: /tmp/variables.txt
# Add socore Group # Add socore Group
socoregroup: socoregroup:
group.present: group.present:
@@ -144,6 +149,28 @@ common_sbin_jinja:
- so-import-pcap - so-import-pcap
{% endif %} {% endif %}
{% if GLOBALS.role == 'so-heavynode' %}
remove_so-pcap-import_heavynode:
file.absent:
- name: /usr/sbin/so-pcap-import
remove_so-import-pcap_heavynode:
file.absent:
- name: /usr/sbin/so-import-pcap
{% endif %}
{% if not GLOBALS.is_manager%}
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
# these two states remove the scripts from non manager nodes
remove_soup:
file.absent:
- name: /usr/sbin/soup
remove_so-firewall:
file.absent:
- name: /usr/sbin/so-firewall
{% endif %}
so-status_script: so-status_script:
file.managed: file.managed:
- name: /usr/sbin/so-status - name: /usr/sbin/so-status

View File

@@ -1,5 +1,52 @@
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init # we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined # since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
{% if grains.os_family == 'Debian' %}
commonpkgs:
pkg.installed:
- skip_suggestions: True
- pkgs:
- apache2-utils
- wget
- ntpdate
- jq
- curl
- ca-certificates
- software-properties-common
- apt-transport-https
- openssl
- netcat-openbsd
- sqlite3
- libssl-dev
- procps
- python3-dateutil
- python3-docker
- python3-packaging
- python3-lxml
- git
- rsync
- vim
- tar
- unzip
- bc
{% if grains.oscodename != 'focal' %}
- python3-rich
{% endif %}
{% if grains.oscodename == 'focal' %}
# since Ubuntu requires and internet connection we can use pip to install modules
python3-pip:
pkg.installed
python-rich:
pip.installed:
- name: rich
- target: /usr/local/lib/python3.8/dist-packages/
- require:
- pkg: python3-pip
{% endif %}
{% endif %}
{% if grains.os_family == 'RedHat' %}
remove_mariadb: remove_mariadb:
pkg.removed: pkg.removed:
@@ -37,3 +84,5 @@ commonpkgs:
- unzip - unzip
- wget - wget
- yum-utils - yum-utils
{% endif %}

View File

@@ -3,6 +3,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} {% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
{% if SOC_GLOBAL.global.airgap %} {% if SOC_GLOBAL.global.airgap %}
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %} {% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
@@ -11,6 +13,14 @@
{% endif %} {% endif %}
{% set SOVERSION = salt['file.read']('/etc/soversion').strip() %} {% set SOVERSION = salt['file.read']('/etc/soversion').strip() %}
remove_common_soup:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
remove_common_so-firewall:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
# This section is used to put the scripts in place in the Salt file system # This section is used to put the scripts in place in the Salt file system
# in case a state run tries to overwrite what we do in the next section. # in case a state run tries to overwrite what we do in the next section.
copy_so-common_common_tools_sbin: copy_so-common_common_tools_sbin:
@@ -110,3 +120,23 @@ copy_bootstrap-salt_sbin:
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh - source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
- force: True - force: True
- preserve: True - preserve: True
{# this is added in 2.4.120 to remove salt repo files pointing to saltproject.io to accomodate the move to broadcom and new bootstrap-salt script #}
{% if salt['pkg.version_cmp'](SOVERSION, '2.4.120') == -1 %}
{% set saltrepofile = '/etc/yum.repos.d/salt.repo' %}
{% if grains.os_family == 'Debian' %}
{% set saltrepofile = '/etc/apt/sources.list.d/salt.list' %}
{% endif %}
remove_saltproject_io_repo_manager:
file.absent:
- name: {{ saltrepofile }}
{% endif %}
{% else %}
fix_23_soup_sbin:
cmd.run:
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
fix_23_soup_salt:
cmd.run:
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
{% endif %}

View File

@@ -16,7 +16,7 @@
if [ "$#" -lt 2 ]; then if [ "$#" -lt 2 ]; then
cat 1>&2 <<EOF cat 1>&2 <<EOF
$0 compiles a BPF expression to be passed to PCAP to apply a socket filter. $0 compiles a BPF expression to be passed to stenotype to apply a socket filter.
Its first argument is the interface (link type is required) and all other arguments Its first argument is the interface (link type is required) and all other arguments
are passed to TCPDump. are passed to TCPDump.

View File

@@ -333,8 +333,8 @@ get_elastic_agent_vars() {
if [ -f "$defaultsfile" ]; then if [ -f "$defaultsfile" ]; then
ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]') ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]')
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/3/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/3/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
@@ -349,16 +349,21 @@ get_random_value() {
} }
gpg_rpm_import() { gpg_rpm_import() {
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then if [[ $is_oracle ]]; then
local RPMKEYSLOC="../salt/repo/client/files/$OS/keys" if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
else local RPMKEYSLOC="../salt/repo/client/files/$OS/keys"
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/$OS/keys" else
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/$OS/keys"
fi
RPMKEYS=('RPM-GPG-KEY-oracle' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
for RPMKEY in "${RPMKEYS[@]}"; do
rpm --import $RPMKEYSLOC/$RPMKEY
echo "Imported $RPMKEY"
done
elif [[ $is_rpm ]]; then
echo "Importing the security onion GPG key"
rpm --import ../salt/repo/client/files/oracle/keys/securityonion.pub
fi fi
RPMKEYS=('RPM-GPG-KEY-oracle' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
for RPMKEY in "${RPMKEYS[@]}"; do
rpm --import $RPMKEYSLOC/$RPMKEY
echo "Imported $RPMKEY"
done
} }
header() { header() {
@@ -610,19 +615,69 @@ salt_minion_count() {
} }
set_os() { set_os() {
if [ -f /etc/redhat-release ] && grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release && [ -f /etc/oracle-release ]; then if [ -f /etc/redhat-release ]; then
OS=oracle if grep -q "Rocky Linux release 9" /etc/redhat-release; then
OSVER=9 OS=rocky
is_oracle=true OSVER=9
is_rpm=true is_rocky=true
is_rpm=true
elif grep -q "CentOS Stream release 9" /etc/redhat-release; then
OS=centos
OSVER=9
is_centos=true
is_rpm=true
elif grep -q "AlmaLinux release 9" /etc/redhat-release; then
OS=alma
OSVER=9
is_alma=true
is_rpm=true
elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then
if [ -f /etc/oracle-release ]; then
OS=oracle
OSVER=9
is_oracle=true
is_rpm=true
else
OS=rhel
OSVER=9
is_rhel=true
is_rpm=true
fi
fi
cron_service_name="crond"
elif [ -f /etc/os-release ]; then
if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
OSVER=focal
UBVER=20.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then
OSVER=jammy
UBVER=22.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then
OSVER=bookworm
DEBVER=12
is_debian=true
OS=debian
is_deb=true
fi
cron_service_name="cron"
fi fi
cron_service_name="crond"
} }
set_minionid() { set_minionid() {
MINIONID=$(lookup_grain id) MINIONID=$(lookup_grain id)
} }
set_palette() {
if [[ $is_deb ]]; then
update-alternatives --set newt-palette /etc/newt/palette.original
fi
}
set_version() { set_version() {
CURRENTVERSION=0.0.0 CURRENTVERSION=0.0.0

View File

@@ -32,6 +32,7 @@ container_list() {
"so-nginx" "so-nginx"
"so-pcaptools" "so-pcaptools"
"so-soc" "so-soc"
"so-steno"
"so-suricata" "so-suricata"
"so-telegraf" "so-telegraf"
"so-zeek" "so-zeek"
@@ -57,6 +58,7 @@ container_list() {
"so-pcaptools" "so-pcaptools"
"so-redis" "so-redis"
"so-soc" "so-soc"
"so-steno"
"so-strelka-backend" "so-strelka-backend"
"so-strelka-manager" "so-strelka-manager"
"so-suricata" "so-suricata"
@@ -69,6 +71,7 @@ container_list() {
"so-logstash" "so-logstash"
"so-nginx" "so-nginx"
"so-redis" "so-redis"
"so-steno"
"so-suricata" "so-suricata"
"so-soc" "so-soc"
"so-telegraf" "so-telegraf"

View File

@@ -179,6 +179,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error

View File

@@ -55,22 +55,19 @@ if [ $SKIP -ne 1 ]; then
fi fi
delete_pcap() { delete_pcap() {
PCAP_DATA="/nsm/suripcap/" PCAP_DATA="/nsm/pcap/"
[ -d $PCAP_DATA ] && rm -rf $PCAP_DATA/* [ -d $PCAP_DATA ] && so-pcap-stop && rm -rf $PCAP_DATA/* && so-pcap-start
} }
delete_suricata() { delete_suricata() {
SURI_LOG="/nsm/suricata/" SURI_LOG="/nsm/suricata/"
[ -d $SURI_LOG ] && rm -rf $SURI_LOG/* [ -d $SURI_LOG ] && so-suricata-stop && rm -rf $SURI_LOG/* && so-suricata-start
} }
delete_zeek() { delete_zeek() {
ZEEK_LOG="/nsm/zeek/logs/" ZEEK_LOG="/nsm/zeek/logs/"
[ -d $ZEEK_LOG ] && so-zeek-stop && rm -rf $ZEEK_LOG/* && so-zeek-start [ -d $ZEEK_LOG ] && so-zeek-stop && rm -rf $ZEEK_LOG/* && so-zeek-start
} }
so-suricata-stop
delete_pcap delete_pcap
delete_suricata delete_suricata
delete_zeek delete_zeek
so-suricata-start

View File

@@ -23,6 +23,7 @@ if [ $# -ge 1 ]; then
fi fi
case $1 in case $1 in
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
"elastic-fleet") docker stop so-elastic-fleet && docker rm so-elastic-fleet && salt-call state.apply elasticfleet queue=True;; "elastic-fleet") docker stop so-elastic-fleet && docker rm so-elastic-fleet && salt-call state.apply elasticfleet queue=True;;
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;; *) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
esac esac

View File

@@ -72,7 +72,7 @@ clean() {
done done
fi fi
## Clean up extracted pcaps ## Clean up extracted pcaps from Steno
PCAPS='/nsm/pcapout' PCAPS='/nsm/pcapout'
OLDEST_PCAP=$(find $PCAPS -type f -printf '%T+ %p\n' | sort -n | head -n 1) OLDEST_PCAP=$(find $PCAPS -type f -printf '%T+ %p\n' | sort -n | head -n 1)
if [ -z "$OLDEST_PCAP" -o "$OLDEST_PCAP" == ".." -o "$OLDEST_PCAP" == "." ]; then if [ -z "$OLDEST_PCAP" -o "$OLDEST_PCAP" == ".." -o "$OLDEST_PCAP" == "." ]; then

View File

@@ -23,6 +23,7 @@ if [ $# -ge 1 ]; then
case $1 in case $1 in
"all") salt-call state.highstate queue=True;; "all") salt-call state.highstate queue=True;;
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
"elastic-fleet") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply elasticfleet queue=True; fi ;; "elastic-fleet") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply elasticfleet queue=True; fi ;;
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;; *) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
esac esac

34
salt/curator/disabled.sls Normal file
View File

@@ -0,0 +1,34 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
so-curator:
docker_container.absent:
- force: True
so-curator_so-status.disabled:
file.line:
- name: /opt/so/conf/so-status/so-status.conf
- match: ^so-curator$
- mode: delete
so-curator-cluster-close:
cron.absent:
- identifier: so-curator-cluster-close
so-curator-cluster-delete:
cron.absent:
- identifier: so-curator-cluster-delete
delete_curator_configuration:
file.absent:
- name: /opt/so/conf/curator
- recurse: True
{% set files = salt.file.find(path='/usr/sbin', name='so-curator*') %}
{% if files|length > 0 %}
delete_curator_scripts:
file.absent:
- names: {{files|yaml}}
{% endif %}

View File

@@ -1,10 +1,6 @@
docker: docker:
range: '172.17.1.0/24' range: '172.17.1.0/24'
gateway: '172.17.1.1' gateway: '172.17.1.1'
ulimits:
- name: nofile
soft: 1048576
hard: 1048576
containers: containers:
'so-dockerregistry': 'so-dockerregistry':
final_octet: 20 final_octet: 20
@@ -13,7 +9,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-elastic-fleet': 'so-elastic-fleet':
final_octet: 21 final_octet: 21
port_bindings: port_bindings:
@@ -21,7 +16,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-elasticsearch': 'so-elasticsearch':
final_octet: 22 final_octet: 22
port_bindings: port_bindings:
@@ -30,16 +24,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits:
- name: memlock
soft: -1
hard: -1
- name: nofile
soft: 65536
hard: 65536
- name: nproc
soft: 4096
hard: 4096
'so-influxdb': 'so-influxdb':
final_octet: 26 final_octet: 26
port_bindings: port_bindings:
@@ -47,7 +31,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-kibana': 'so-kibana':
final_octet: 27 final_octet: 27
port_bindings: port_bindings:
@@ -55,7 +38,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-kratos': 'so-kratos':
final_octet: 28 final_octet: 28
port_bindings: port_bindings:
@@ -64,7 +46,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-hydra': 'so-hydra':
final_octet: 30 final_octet: 30
port_bindings: port_bindings:
@@ -73,7 +54,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-logstash': 'so-logstash':
final_octet: 29 final_octet: 29
port_bindings: port_bindings:
@@ -90,7 +70,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-nginx': 'so-nginx':
final_octet: 31 final_octet: 31
port_bindings: port_bindings:
@@ -102,7 +81,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-nginx-fleet-node': 'so-nginx-fleet-node':
final_octet: 31 final_octet: 31
port_bindings: port_bindings:
@@ -110,7 +88,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-redis': 'so-redis':
final_octet: 33 final_octet: 33
port_bindings: port_bindings:
@@ -119,13 +96,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-sensoroni': 'so-sensoroni':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-soc': 'so-soc':
final_octet: 34 final_octet: 34
port_bindings: port_bindings:
@@ -133,19 +108,16 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-strelka-backend': 'so-strelka-backend':
final_octet: 36 final_octet: 36
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-strelka-filestream': 'so-strelka-filestream':
final_octet: 37 final_octet: 37
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-strelka-frontend': 'so-strelka-frontend':
final_octet: 38 final_octet: 38
port_bindings: port_bindings:
@@ -153,13 +125,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-strelka-manager': 'so-strelka-manager':
final_octet: 39 final_octet: 39
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-strelka-gatekeeper': 'so-strelka-gatekeeper':
final_octet: 40 final_octet: 40
port_bindings: port_bindings:
@@ -167,7 +137,6 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-strelka-coordinator': 'so-strelka-coordinator':
final_octet: 41 final_octet: 41
port_bindings: port_bindings:
@@ -175,13 +144,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-elastalert': 'so-elastalert':
final_octet: 42 final_octet: 42
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-elastic-fleet-package-registry': 'so-elastic-fleet-package-registry':
final_octet: 44 final_octet: 44
port_bindings: port_bindings:
@@ -189,13 +156,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-idh': 'so-idh':
final_octet: 45 final_octet: 45
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-elastic-agent': 'so-elastic-agent':
final_octet: 46 final_octet: 46
port_bindings: port_bindings:
@@ -204,34 +169,28 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []
'so-telegraf': 'so-telegraf':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: [] 'so-steno':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-suricata': 'so-suricata':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: ulimits:
- name: memlock - memlock=524288000
soft: 524288000
hard: 524288000
'so-zeek': 'so-zeek':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits:
- name: core
soft: 0
hard: 0
- name: nofile
soft: 1048576
hard: 1048576
'so-kafka': 'so-kafka':
final_octet: 88 final_octet: 88
port_bindings: port_bindings:
@@ -242,4 +201,3 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits: []

View File

@@ -1,8 +1,8 @@
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %} {% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
{% set DOCKERMERGED = salt['pillar.get']('docker', DOCKERDEFAULTS.docker, merge=True) %} {% set DOCKER = salt['pillar.get']('docker', DOCKERDEFAULTS.docker, merge=True) %}
{% set RANGESPLIT = DOCKERMERGED.range.split('.') %} {% set RANGESPLIT = DOCKER.range.split('.') %}
{% set FIRSTTHREE = RANGESPLIT[0] ~ '.' ~ RANGESPLIT[1] ~ '.' ~ RANGESPLIT[2] ~ '.' %} {% set FIRSTTHREE = RANGESPLIT[0] ~ '.' ~ RANGESPLIT[1] ~ '.' ~ RANGESPLIT[2] ~ '.' %}
{% for container, vals in DOCKERMERGED.containers.items() %} {% for container, vals in DOCKER.containers.items() %}
{% do DOCKERMERGED.containers[container].update({'ip': FIRSTTHREE ~ DOCKERMERGED.containers[container].final_octet}) %} {% do DOCKER.containers[container].update({'ip': FIRSTTHREE ~ DOCKER.containers[container].final_octet}) %}
{% endfor %} {% endfor %}

View File

@@ -1,24 +0,0 @@
{% from 'docker/docker.map.jinja' import DOCKERMERGED -%}
{
"registry-mirrors": [
"https://:5000"
],
"bip": "172.17.0.1/24",
"default-address-pools": [
{
"base": "172.17.0.0/24",
"size": 24
}
]
{%- if DOCKERMERGED.ulimits %},
"default-ulimits": {
{%- for ULIMIT in DOCKERMERGED.ulimits %}
"{{ ULIMIT.name }}": {
"Name": "{{ ULIMIT.name }}",
"Soft": {{ ULIMIT.soft }},
"Hard": {{ ULIMIT.hard }}
}{{ "," if not loop.last else "" }}
{%- endfor %}
}
{%- endif %}
}

View File

@@ -3,7 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# docker service requires the ca.crt # docker service requires the ca.crt
@@ -15,6 +15,39 @@ dockergroup:
- name: docker - name: docker
- gid: 920 - gid: 920
{% if GLOBALS.os_family == 'Debian' %}
{% if grains.oscodename == 'bookworm' %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 2.2.1-1~debian.12~bookworm
- docker-ce: 5:29.2.1-1~debian.12~bookworm
- docker-ce-cli: 5:29.2.1-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:29.2.1-1~debian.12~bookworm
- hold: True
- update_holds: True
{% elif grains.oscodename == 'jammy' %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 2.2.1-1~ubuntu.22.04~jammy
- docker-ce: 5:29.2.1-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:29.2.1-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:29.2.1-1~ubuntu.22.04~jammy
- hold: True
- update_holds: True
{% else %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.7.21-1
- docker-ce: 5:27.2.0-1~ubuntu.20.04~focal
- docker-ce-cli: 5:27.2.0-1~ubuntu.20.04~focal
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.20.04~focal
- hold: True
- update_holds: True
{% endif %}
{% else %}
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
@@ -24,6 +57,7 @@ dockerheldpackages:
- docker-ce-rootless-extras: 29.2.1-1.el9 - docker-ce-rootless-extras: 29.2.1-1.el9
- hold: True - hold: True
- update_holds: True - update_holds: True
{% endif %}
#disable docker from managing iptables #disable docker from managing iptables
iptables_disabled: iptables_disabled:
@@ -41,9 +75,10 @@ dockeretc:
file.directory: file.directory:
- name: /etc/docker - name: /etc/docker
# Manager daemon.json
docker_daemon: docker_daemon:
file.managed: file.managed:
- source: salt://docker/files/daemon.json.jinja - source: salt://common/files/daemon.json
- name: /etc/docker/daemon.json - name: /etc/docker/daemon.json
- template: jinja - template: jinja
@@ -74,8 +109,8 @@ dockerreserveports:
sos_docker_net: sos_docker_net:
docker_network.present: docker_network.present:
- name: sobridge - name: sobridge
- subnet: {{ DOCKERMERGED.range }} - subnet: {{ DOCKER.range }}
- gateway: {{ DOCKERMERGED.gateway }} - gateway: {{ DOCKER.gateway }}
- options: - options:
com.docker.network.bridge.name: 'sobridge' com.docker.network.bridge.name: 'sobridge'
com.docker.network.driver.mtu: '1500' com.docker.network.driver.mtu: '1500'

View File

@@ -7,25 +7,6 @@ docker:
description: Default docker IP range for containers. description: Default docker IP range for containers.
helpLink: docker.html helpLink: docker.html
advanced: True advanced: True
ulimits:
description: |
Default ulimit settings applied to all containers via the Docker daemon. Each entry specifies a resource name (e.g. nofile, memlock, core, nproc) with soft and hard limits. Individual container ulimits override these defaults. Valid resource names include: cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime.
forcedType: "[]{}"
syntax: json
advanced: True
helpLink: docker.html
uiElements:
- field: name
label: Resource Name
required: True
regex: ^(cpu|fsize|data|stack|core|rss|nproc|nofile|memlock|as|locks|sigpending|msgqueue|nice|rtprio|rttime)$
regexFailureMessage: You must enter a valid ulimit name (cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime).
- field: soft
label: Soft Limit
forcedType: int
- field: hard
label: Hard Limit
forcedType: int
containers: containers:
so-dockerregistry: &dockerOptions so-dockerregistry: &dockerOptions
final_octet: final_octet:
@@ -58,25 +39,6 @@ docker:
helpLink: docker.html helpLink: docker.html
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
ulimits:
description: |
Ulimit settings for the container. Each entry specifies a resource name (e.g. nofile, memlock, core, nproc) with optional soft and hard limits. Valid resource names include: cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime.
advanced: True
helpLink: docker.html
forcedType: "[]{}"
syntax: json
uiElements:
- field: name
label: Resource Name
required: True
regex: ^(cpu|fsize|data|stack|core|rss|nproc|nofile|memlock|as|locks|sigpending|msgqueue|nice|rtprio|rttime)$
regexFailureMessage: You must enter a valid ulimit name (cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime).
- field: soft
label: Soft Limit
forcedType: int
- field: hard
label: Hard Limit
forcedType: int
so-elastic-fleet: *dockerOptions so-elastic-fleet: *dockerOptions
so-elasticsearch: *dockerOptions so-elasticsearch: *dockerOptions
so-influxdb: *dockerOptions so-influxdb: *dockerOptions
@@ -100,6 +62,43 @@ docker:
so-idh: *dockerOptions so-idh: *dockerOptions
so-elastic-agent: *dockerOptions so-elastic-agent: *dockerOptions
so-telegraf: *dockerOptions so-telegraf: *dockerOptions
so-suricata: *dockerOptions so-steno: *dockerOptions
so-suricata:
final_octet:
description: Last octet of the container IP address.
helpLink: docker.html
readonly: True
advanced: True
global: True
port_bindings:
description: List of port bindings for the container.
helpLink: docker.html
advanced: True
multiline: True
forcedType: "[]string"
custom_bind_mounts:
description: List of custom local volume bindings.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_hosts:
description: List of additional host entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
ulimits:
description: Ulimits for the container, in bytes.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions so-zeek: *dockerOptions
so-kafka: *dockerOptions so-kafka: *dockerOptions

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- elastalert.config - elastalert.config
@@ -24,7 +24,7 @@ so-elastalert:
- user: so-elastalert - user: so-elastalert
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-elastalert'].ip }} - ipv4_address: {{ DOCKER.containers['so-elastalert'].ip }}
- detach: True - detach: True
- binds: - binds:
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro - /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
@@ -33,30 +33,24 @@ so-elastalert:
- /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro - /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro
- /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro - /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro - /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
{% if DOCKERMERGED.containers['so-elastalert'].custom_bind_mounts %} {% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-elastalert'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKERMERGED.containers['so-elastalert'].extra_hosts %} {% if DOCKER.containers['so-elastalert'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-elastalert'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-elastalert'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elastalert'].extra_env %} {% if DOCKER.containers['so-elastalert'].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKERMERGED.containers['so-elastalert'].extra_env %} {% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elastalert'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-elastalert'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- require: - require:
- cmd: wait_for_elasticsearch - cmd: wait_for_elasticsearch
- file: elastarules - file: elastarules

View File

@@ -0,0 +1 @@
THIS IS A PLACEHOLDER FILE

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- elastic-fleet-package-registry.config - elastic-fleet-package-registry.config
@@ -21,36 +21,30 @@ so-elastic-fleet-package-registry:
- user: 948 - user: 948
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-elastic-fleet-package-registry'].ip }} - ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_hosts %} {% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].port_bindings %} {% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %} {% if DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
- binds: - binds:
{% for BIND in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_env %} {% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_env %} {% for XTRAENV in DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
delete_so-elastic-fleet-package-registry_so-status.disabled: delete_so-elastic-fleet-package-registry_so-status.disabled:
file.uncomment: file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ca - ca
@@ -22,17 +22,17 @@ so-elastic-agent:
- user: 949 - user: 949
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-elastic-agent'].ip }} - ipv4_address: {{ DOCKER.containers['so-elastic-agent'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKERMERGED.containers['so-elastic-agent'].extra_hosts %} {% if DOCKER.containers['so-elastic-agent'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-elastic-agent'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-elastic-agent'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-elastic-agent'].port_bindings %} {% for BINDING in DOCKER.containers['so-elastic-agent'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- binds: - binds:
@@ -41,25 +41,19 @@ so-elastic-agent:
- /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro - /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro
- /nsm:/nsm:ro - /nsm:/nsm:ro
- /opt/so/log:/opt/so/log:ro - /opt/so/log:/opt/so/log:ro
{% if DOCKERMERGED.containers['so-elastic-agent'].custom_bind_mounts %} {% if DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-elastic-agent'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- environment: - environment:
- FLEET_CA=/etc/pki/tls/certs/intca.crt - FLEET_CA=/etc/pki/tls/certs/intca.crt
- LOGS_PATH=logs - LOGS_PATH=logs
{% if DOCKERMERGED.containers['so-elastic-agent'].extra_env %} {% if DOCKER.containers['so-elastic-agent'].extra_env %}
{% for XTRAENV in DOCKERMERGED.containers['so-elastic-agent'].extra_env %} {% for XTRAENV in DOCKER.containers['so-elastic-agent'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elastic-agent'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-elastic-agent'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- require: - require:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca - file: trusttheca

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} {% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{# This value is generated during node install and stored in minion pillar #} {# This value is generated during node install and stored in minion pillar #}
@@ -94,17 +94,17 @@ so-elastic-fleet:
- user: 947 - user: 947
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-elastic-fleet'].ip }} - ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKERMERGED.containers['so-elastic-fleet'].extra_hosts %} {% if DOCKER.containers['so-elastic-fleet'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-elastic-fleet'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-elastic-fleet'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-elastic-fleet'].port_bindings %} {% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- binds: - binds:
@@ -112,8 +112,8 @@ so-elastic-fleet:
- /etc/pki/elasticfleet-server.key:/etc/pki/elasticfleet-server.key:ro - /etc/pki/elasticfleet-server.key:/etc/pki/elasticfleet-server.key:ro
- /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro - /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro
- /opt/so/log/elasticfleet:/usr/share/elastic-agent/logs - /opt/so/log/elasticfleet:/usr/share/elastic-agent/logs
{% if DOCKERMERGED.containers['so-elastic-fleet'].custom_bind_mounts %} {% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-elastic-fleet'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -128,17 +128,11 @@ so-elastic-fleet:
- FLEET_CA=/etc/pki/tls/certs/intca.crt - FLEET_CA=/etc/pki/tls/certs/intca.crt
- FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/tls/certs/intca.crt - FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/tls/certs/intca.crt
- LOGS_PATH=logs - LOGS_PATH=logs
{% if DOCKERMERGED.containers['so-elastic-fleet'].extra_env %} {% if DOCKER.containers['so-elastic-fleet'].extra_env %}
{% for XTRAENV in DOCKERMERGED.containers['so-elastic-fleet'].extra_env %} {% for XTRAENV in DOCKER.containers['so-elastic-fleet'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elastic-fleet'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-elastic-fleet'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: trusttheca - file: trusttheca
- x509: etc_elasticfleet_key - x509: etc_elasticfleet_key

View File

@@ -6,6 +6,8 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %} {% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %}
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
{# this is a list of dicts containing hostname:ip for elasticsearch nodes that need to know about each other for cluster #} {# this is a list of dicts containing hostname:ip for elasticsearch nodes that need to know about each other for cluster #}
{% set ELASTICSEARCH_SEED_HOSTS = [] %} {% set ELASTICSEARCH_SEED_HOSTS = [] %}
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %} {% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
@@ -34,8 +36,14 @@
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% elif grains.id.split('_') | last == 'searchnode' %} {% elif grains.id.split('_') | last == 'searchnode' %}
{% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %}
{% endif %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %}
{% endif %} {% endif %}
{% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %}
{% endif %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'name': GLOBALS.hostname}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'name': GLOBALS.hostname}) %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.cluster.update({'name': GLOBALS.hostname}) %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.cluster.update({'name': GLOBALS.hostname}) %}

View File

@@ -98,6 +98,10 @@ esrolesdir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
eslibdir:
file.absent:
- name: /opt/so/conf/elasticsearch/lib
esingestdynamicconf: esingestdynamicconf:
file.recurse: file.recurse:
- name: /opt/so/conf/elasticsearch/ingest - name: /opt/so/conf/elasticsearch/ingest
@@ -115,6 +119,11 @@ esingestconf:
- group: 939 - group: 939
- show_changes: False - show_changes: False
# Remove .fleet_final_pipeline-1 because we are using global@custom now
so-fleet-final-pipeline-remove:
file.absent:
- name: /opt/so/conf/elasticsearch/ingest/.fleet_final_pipeline-1
# Auto-generate Elasticsearch ingest node pipelines from pillar # Auto-generate Elasticsearch ingest node pipelines from pillar
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %} {% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
es_ingest_conf_{{pipeline}}: es_ingest_conf_{{pipeline}}:

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
@@ -28,15 +28,15 @@ so-elasticsearch:
- user: elasticsearch - user: elasticsearch
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-elasticsearch'].ip }} - ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
- extra_hosts: - extra_hosts:
{% for node in ELASTICSEARCH_NODES %} {% for node in ELASTICSEARCH_NODES %}
{% for hostname, ip in node.items() %} {% for hostname, ip in node.items() %}
- {{hostname}}:{{ip}} - {{hostname}}:{{ip}}
{% endfor %} {% endfor %}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-elasticsearch'].extra_hosts %} {% if DOCKER.containers['so-elasticsearch'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-elasticsearch'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-elasticsearch'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -45,19 +45,17 @@ so-elasticsearch:
- discovery.type=single-node - discovery.type=single-node
{% endif %} {% endif %}
- ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true - ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true
{% if DOCKERMERGED.containers['so-elasticsearch'].extra_env %} ulimits:
{% for XTRAENV in DOCKERMERGED.containers['so-elasticsearch'].extra_env %} - memlock=-1:-1
- nofile=65536:65536
- nproc=4096
{% if DOCKER.containers['so-elasticsearch'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elasticsearch'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elasticsearch'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-elasticsearch'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-elasticsearch'].port_bindings %} {% for BINDING in DOCKER.containers['so-elasticsearch'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- binds: - binds:
@@ -77,8 +75,8 @@ so-elasticsearch:
- {{ repo }}:{{ repo }}:rw - {{ repo }}:{{ repo }}:rw
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-elasticsearch'].custom_bind_mounts %} {% if DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-elasticsearch'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}

View File

@@ -1,3 +1,5 @@
{%- set HIGHLANDER = salt['pillar.get']('global:highlander', False) -%}
{%- raw -%}
{ {
"description" : "common", "description" : "common",
"processors" : [ "processors" : [
@@ -65,7 +67,19 @@
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } }, { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} }, { "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
{%- endraw %}
{%- if HIGHLANDER %}
,
{
"pipeline": {
"name": "ecs"
}
}
{%- endif %}
{%- raw %}
,
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } } { "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
] ]
} }
{% endraw %}

View File

@@ -27,12 +27,14 @@ iptables_config:
- source: salt://firewall/iptables.jinja - source: salt://firewall/iptables.jinja
- template: jinja - template: jinja
{% if grains.os_family == 'RedHat' %}
disable_firewalld: disable_firewalld:
service.dead: service.dead:
- name: firewalld - name: firewalld
- enable: False - enable: False
- require: - require:
- file: iptables_config - file: iptables_config
{% endif %}
iptables_restore: iptables_restore:
cmd.run: cmd.run:
@@ -42,6 +44,7 @@ iptables_restore:
- onlyif: - onlyif:
- iptables-restore --test {{ iptmap.configfile }} - iptables-restore --test {{ iptmap.configfile }}
{% if grains.os_family == 'RedHat' %}
enable_firewalld: enable_firewalld:
service.running: service.running:
- name: firewalld - name: firewalld
@@ -49,6 +52,7 @@ enable_firewalld:
- onfail: - onfail:
- file: iptables_config - file: iptables_config
- cmd: iptables_restore - cmd: iptables_restore
{% endif %}
{% else %} {% else %}

View File

@@ -1,6 +1,14 @@
{% set iptmap = { {% set iptmap = salt['grains.filter_by']({
'service': 'iptables', 'Debian': {
'iptpkg': 'iptables-nft', 'service': 'netfilter-persistent',
'persistpkg': 'iptables-nft-services', 'iptpkg': 'iptables',
'configfile': '/etc/sysconfig/iptables' 'persistpkg': 'iptables-persistent',
} %} 'configfile': '/etc/iptables/rules.v4'
},
'RedHat': {
'service': 'iptables',
'iptpkg': 'iptables-nft',
'persistpkg': 'iptables-nft-services',
'configfile': '/etc/sysconfig/iptables'
},
}) %}

View File

@@ -1,5 +1,5 @@
{%- from 'vars/globals.map.jinja' import GLOBALS %} {%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'docker/docker.map.jinja' import DOCKERMERGED %} {%- from 'docker/docker.map.jinja' import DOCKER %}
{%- from 'firewall/map.jinja' import FIREWALL_MERGED %} {%- from 'firewall/map.jinja' import FIREWALL_MERGED %}
{%- set role = GLOBALS.role.split('-')[1] %} {%- set role = GLOBALS.role.split('-')[1] %}
{%- from 'firewall/containers.map.jinja' import NODE_CONTAINERS %} {%- from 'firewall/containers.map.jinja' import NODE_CONTAINERS %}
@@ -8,9 +8,9 @@
{%- set D1 = [] %} {%- set D1 = [] %}
{%- set D2 = [] %} {%- set D2 = [] %}
{%- for container in NODE_CONTAINERS %} {%- for container in NODE_CONTAINERS %}
{%- set IP = DOCKERMERGED.containers[container].ip %} {%- set IP = DOCKER.containers[container].ip %}
{%- if DOCKERMERGED.containers[container].port_bindings is defined %} {%- if DOCKER.containers[container].port_bindings is defined %}
{%- for binding in DOCKERMERGED.containers[container].port_bindings %} {%- for binding in DOCKER.containers[container].port_bindings %}
{#- cant split int so we convert to string #} {#- cant split int so we convert to string #}
{%- set binding = binding|string %} {%- set binding = binding|string %}
{#- split the port binding by /. if proto not specified, default is tcp #} {#- split the port binding by /. if proto not specified, default is tcp #}
@@ -33,13 +33,13 @@
{%- set hostPort = bsa[0] %} {%- set hostPort = bsa[0] %}
{%- set containerPort = bsa[1] %} {%- set containerPort = bsa[1] %}
{%- endif %} {%- endif %}
{%- do PR.append("-A POSTROUTING -s " ~ DOCKERMERGED.containers[container].ip ~ "/32 -d " ~ DOCKERMERGED.containers[container].ip ~ "/32 -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j MASQUERADE") %} {%- do PR.append("-A POSTROUTING -s " ~ DOCKER.containers[container].ip ~ "/32 -d " ~ DOCKER.containers[container].ip ~ "/32 -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j MASQUERADE") %}
{%- if bindip | length and bindip != '0.0.0.0' %} {%- if bindip | length and bindip != '0.0.0.0' %}
{%- do D1.append("-A DOCKER -d " ~ bindip ~ "/32 ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKERMERGED.containers[container].ip ~ ":" ~ containerPort) %} {%- do D1.append("-A DOCKER -d " ~ bindip ~ "/32 ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKER.containers[container].ip ~ ":" ~ containerPort) %}
{%- else %} {%- else %}
{%- do D1.append("-A DOCKER ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKERMERGED.containers[container].ip ~ ":" ~ containerPort) %} {%- do D1.append("-A DOCKER ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKER.containers[container].ip ~ ":" ~ containerPort) %}
{%- endif %} {%- endif %}
{%- do D2.append("-A DOCKER -d " ~ DOCKERMERGED.containers[container].ip ~ "/32 ! -i sobridge -o sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j ACCEPT") %} {%- do D2.append("-A DOCKER -d " ~ DOCKER.containers[container].ip ~ "/32 ! -i sobridge -o sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j ACCEPT") %}
{%- endfor %} {%- endfor %}
{%- endif %} {%- endif %}
{%- endfor %} {%- endfor %}
@@ -52,7 +52,7 @@
:DOCKER - [0:0] :DOCKER - [0:0]
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s {{DOCKERMERGED.range}} ! -o sobridge -j MASQUERADE -A POSTROUTING -s {{DOCKER.range}} ! -o sobridge -j MASQUERADE
{%- for rule in PR %} {%- for rule in PR %}
{{ rule }} {{ rule }}
{%- endfor %} {%- endfor %}

View File

@@ -1,11 +1,11 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %} {% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %}
{# add our ip to self #} {# add our ip to self #}
{% do FIREWALL_DEFAULT.firewall.hostgroups.self.append(GLOBALS.node_ip) %} {% do FIREWALL_DEFAULT.firewall.hostgroups.self.append(GLOBALS.node_ip) %}
{# add dockernet range #} {# add dockernet range #}
{% do FIREWALL_DEFAULT.firewall.hostgroups.dockernet.append(DOCKERMERGED.range) %} {% do FIREWALL_DEFAULT.firewall.hostgroups.dockernet.append(DOCKER.range) %}
{% if GLOBALS.role == 'so-idh' %} {% if GLOBALS.role == 'so-idh' %}
{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %} {% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %}

View File

@@ -1,3 +1,3 @@
global: global:
pcapengine: SURICATA pcapengine: STENO
pipeline: REDIS pipeline: REDIS

View File

@@ -18,11 +18,13 @@ global:
regexFailureMessage: You must enter either ZEEK or SURICATA. regexFailureMessage: You must enter either ZEEK or SURICATA.
global: True global: True
pcapengine: pcapengine:
description: Which engine to use for generating pcap. Currently only SURICATA is supported. description: Which engine to use for generating pcap. Options are STENO, SURICATA or TRANSITION.
regex: ^(SURICATA)$ regex: ^(STENO|SURICATA|TRANSITION)$
options: options:
- STENO
- SURICATA - SURICATA
regexFailureMessage: You must enter either SURICATA. - TRANSITION
regexFailureMessage: You must enter either STENO, SURICATA or TRANSITION.
global: True global: True
ids: ids:
description: Which IDS engine to use. Currently only Suricata is supported. description: Which IDS engine to use. Currently only Suricata is supported.

View File

@@ -11,7 +11,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% if 'api' in salt['pillar.get']('features', []) %} {% if 'api' in salt['pillar.get']('features', []) %}
@@ -26,38 +26,32 @@ so-hydra:
- name: so-hydra - name: so-hydra
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-hydra'].ip }} - ipv4_address: {{ DOCKER.containers['so-hydra'].ip }}
- binds: - binds:
- /opt/so/conf/hydra/:/hydra-conf:ro - /opt/so/conf/hydra/:/hydra-conf:ro
- /opt/so/log/hydra/:/hydra-log:rw - /opt/so/log/hydra/:/hydra-log:rw
- /nsm/hydra/db:/hydra-data:rw - /nsm/hydra/db:/hydra-data:rw
{% if DOCKERMERGED.containers['so-hydra'].custom_bind_mounts %} {% if DOCKER.containers['so-hydra'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-hydra'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-hydra'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-hydra'].port_bindings %} {% for BINDING in DOCKER.containers['so-hydra'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-hydra'].extra_hosts %} {% if DOCKER.containers['so-hydra'].extra_hosts %}
- extra_hosts: - extra_hosts:
{% for XTRAHOST in DOCKERMERGED.containers['so-hydra'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-hydra'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-hydra'].extra_env %} {% if DOCKER.containers['so-hydra'].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKERMERGED.containers['so-hydra'].extra_env %} {% for XTRAENV in DOCKER.containers['so-hydra'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-hydra'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-hydra'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- restart_policy: unless-stopped - restart_policy: unless-stopped
- watch: - watch:
- file: hydraconfig - file: hydraconfig
@@ -73,7 +67,7 @@ delete_so-hydra_so-status.disabled:
wait_for_hydra: wait_for_hydra:
http.wait_for_successful_query: http.wait_for_successful_query:
- name: 'http://{{ GLOBALS.manager }}:4444/health/alive' - name: 'http://{{ GLOBALS.manager }}:4444/'
- ssl: True - ssl: True
- verify_ssl: False - verify_ssl: False
- status: - status:

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- idh.config - idh.config
@@ -22,29 +22,23 @@ so-idh:
- /nsm/idh:/var/tmp:rw - /nsm/idh:/var/tmp:rw
- /opt/so/conf/idh/http-skins:/usr/local/lib/python3.12/site-packages/opencanary/modules/data/http/skin:ro - /opt/so/conf/idh/http-skins:/usr/local/lib/python3.12/site-packages/opencanary/modules/data/http/skin:ro
- /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro - /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro
{% if DOCKERMERGED.containers['so-idh'].custom_bind_mounts %} {% if DOCKER.containers['so-idh'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-idh'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-idh'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-idh'].extra_hosts %} {% if DOCKER.containers['so-idh'].extra_hosts %}
- extra_hosts: - extra_hosts:
{% for XTRAHOST in DOCKERMERGED.containers['so-idh'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-idh'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-idh'].extra_env %} {% if DOCKER.containers['so-idh'].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKERMERGED.containers['so-idh'].extra_env %} {% for XTRAENV in DOCKER.containers['so-idh'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-idh'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-idh'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: opencanary_config - file: opencanary_config
- require: - require:

View File

@@ -3,6 +3,7 @@
include: include:
- idh.openssh - idh.openssh
{% if grains.os_family == 'RedHat' %}
idh_sshd_selinux: idh_sshd_selinux:
selinux.port_policy_present: selinux.port_policy_present:
- port: {{ openssh_map.config.port }} - port: {{ openssh_map.config.port }}
@@ -12,6 +13,7 @@ idh_sshd_selinux:
- file: openssh_config - file: openssh_config
- require: - require:
- pkg: python_selinux_mgmt_tools - pkg: python_selinux_mgmt_tools
{% endif %}
openssh_config: openssh_config:
file.replace: file.replace:

View File

@@ -16,6 +16,8 @@ openssh:
- name: {{ openssh_map.service }} - name: {{ openssh_map.service }}
{% endif %} {% endif %}
{% if grains.os_family == 'RedHat' %}
python_selinux_mgmt_tools: python_selinux_mgmt_tools:
pkg.installed: pkg.installed:
- name: policycoreutils-python-utils - name: policycoreutils-python-utils
{% endif %}

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% set PASSWORD = salt['pillar.get']('secrets:influx_pass') %} {% set PASSWORD = salt['pillar.get']('secrets:influx_pass') %}
{% set TOKEN = salt['pillar.get']('influxdb:token') %} {% set TOKEN = salt['pillar.get']('influxdb:token') %}
@@ -21,7 +21,7 @@ so-influxdb:
- hostname: influxdb - hostname: influxdb
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-influxdb'].ip }} - ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment: - environment:
- INFLUXD_CONFIG_PATH=/conf/config.yaml - INFLUXD_CONFIG_PATH=/conf/config.yaml
- INFLUXDB_HTTP_LOG_ENABLED=false - INFLUXDB_HTTP_LOG_ENABLED=false
@@ -31,8 +31,8 @@ so-influxdb:
- DOCKER_INFLUXDB_INIT_ORG=Security Onion - DOCKER_INFLUXDB_INIT_ORG=Security Onion
- DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term - DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }} - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }}
{% if DOCKERMERGED.containers['so-influxdb'].extra_env %} {% if DOCKER.containers['so-influxdb'].extra_env %}
{% for XTRAENV in DOCKERMERGED.containers['so-influxdb'].extra_env %} {% for XTRAENV in DOCKER.containers['so-influxdb'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -43,27 +43,21 @@ so-influxdb:
- /nsm/influxdb:/var/lib/influxdb2:rw - /nsm/influxdb:/var/lib/influxdb2:rw
- /etc/pki/influxdb.crt:/conf/influxdb.crt:ro - /etc/pki/influxdb.crt:/conf/influxdb.crt:ro
- /etc/pki/influxdb.key:/conf/influxdb.key:ro - /etc/pki/influxdb.key:/conf/influxdb.key:ro
{% if DOCKERMERGED.containers['so-influxdb'].custom_bind_mounts %} {% if DOCKER.containers['so-influxdb'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-influxdb'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-influxdb'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-influxdb'].port_bindings %} {% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-influxdb'].extra_hosts %} {% if DOCKER.containers['so-influxdb'].extra_hosts %}
- extra_hosts: - extra_hosts:
{% for XTRAHOST in DOCKERMERGED.containers['so-influxdb'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-influxdb'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-influxdb'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-influxdb'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: influxdbconf - file: influxdbconf
- x509: influxdb_key - x509: influxdb_key

View File

@@ -0,0 +1,27 @@
[{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "CheckThreshold",
"metadata": {
"name": "steno-packet-loss"
},
"spec": {
"description": "Triggers when the average percent of packet loss is above the defined threshold. To tune this alert, modify the value for the appropriate alert level.",
"every": "1m",
"name": "Stenographer Packet Loss",
"query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"stenodrop\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"drop\")\n |\u003e aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |\u003e yield(name: \"mean\")",
"status": "active",
"statusMessageTemplate": "Stenographer Packet Loss on node ${r.host} has reached the ${ r._level } threshold. The current packet loss is ${ r.drop }%.",
"thresholds": [
{
"level": "CRIT",
"type": "greater",
"value": 5
},
{
"level": "WARN",
"type": "greater",
"value": 3
}
]
}
}]

File diff suppressed because one or more lines are too long

View File

@@ -12,7 +12,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} {% set KAFKANODES = salt['pillar.get']('kafka:nodes') %}
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %} {% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
{% if 'gmd' in salt['pillar.get']('features', []) %} {% if 'gmd' in salt['pillar.get']('features', []) %}
@@ -31,22 +31,22 @@ so-kafka:
- name: so-kafka - name: so-kafka
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-kafka'].ip }} - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
- user: kafka - user: kafka
- environment: - environment:
KAFKA_HEAP_OPTS: -Xmx2G -Xms1G KAFKA_HEAP_OPTS: -Xmx2G -Xms1G
KAFKA_OPTS: "-javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKERMERGED.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml {%- if KAFKA_EXTERNAL_ACCESS %} -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf {% endif -%}" KAFKA_OPTS: "-javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml {%- if KAFKA_EXTERNAL_ACCESS %} -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf {% endif -%}"
- extra_hosts: - extra_hosts:
{% for node in KAFKANODES %} {% for node in KAFKANODES %}
- {{ node }}:{{ KAFKANODES[node].ip }} - {{ node }}:{{ KAFKANODES[node].ip }}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-kafka'].extra_hosts %} {% if DOCKER.containers['so-kafka'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-kafka'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-kafka'].port_bindings %} {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- binds: - binds:
@@ -60,12 +60,6 @@ so-kafka:
{% if KAFKA_EXTERNAL_ACCESS %} {% if KAFKA_EXTERNAL_ACCESS %}
- /opt/so/conf/kafka/kafka_server_jaas.conf:/opt/kafka/config/kafka_server_jaas.conf:ro - /opt/so/conf/kafka/kafka_server_jaas.conf:/opt/kafka/config/kafka_server_jaas.conf:ro
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-kafka'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-kafka'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- watch: - watch:
{% for sc in ['server', 'client'] %} {% for sc in ['server', 'client'] %}
- file: kafka_kraft_{{sc}}_properties - file: kafka_kraft_{{sc}}_properties

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
@@ -20,20 +20,20 @@ so-kibana:
- user: kibana - user: kibana
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-kibana'].ip }} - ipv4_address: {{ DOCKER.containers['so-kibana'].ip }}
- environment: - environment:
- ELASTICSEARCH_HOST={{ GLOBALS.manager }} - ELASTICSEARCH_HOST={{ GLOBALS.manager }}
- ELASTICSEARCH_PORT=9200 - ELASTICSEARCH_PORT=9200
- MANAGER={{ GLOBALS.manager }} - MANAGER={{ GLOBALS.manager }}
{% if DOCKERMERGED.containers['so-kibana'].extra_env %} {% if DOCKER.containers['so-kibana'].extra_env %}
{% for XTRAENV in DOCKERMERGED.containers['so-kibana'].extra_env %} {% for XTRAENV in DOCKER.containers['so-kibana'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKERMERGED.containers['so-kibana'].extra_hosts %} {% if DOCKER.containers['so-kibana'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-kibana'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-kibana'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -42,21 +42,15 @@ so-kibana:
- /opt/so/log/kibana:/var/log/kibana:rw - /opt/so/log/kibana:/var/log/kibana:rw
- /opt/so/conf/kibana/customdashboards:/usr/share/kibana/custdashboards:ro - /opt/so/conf/kibana/customdashboards:/usr/share/kibana/custdashboards:ro
- /sys/fs/cgroup:/sys/fs/cgroup:ro - /sys/fs/cgroup:/sys/fs/cgroup:ro
{% if DOCKERMERGED.containers['so-kibana'].custom_bind_mounts %} {% if DOCKER.containers['so-kibana'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-kibana'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-kibana'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-kibana'].port_bindings %} {% for BINDING in DOCKER.containers['so-kibana'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-kibana'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-kibana'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: kibanaconfig - file: kibanaconfig

View File

@@ -5,6 +5,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'kibana/defaults.yaml' as KIBANADEFAULTS with context %} {% import_yaml 'kibana/defaults.yaml' as KIBANADEFAULTS with context %}
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
{% do KIBANADEFAULTS.kibana.config.server.update({'publicBaseUrl': 'https://' ~ GLOBALS.url_base ~ '/kibana'}) %} {% do KIBANADEFAULTS.kibana.config.server.update({'publicBaseUrl': 'https://' ~ GLOBALS.url_base ~ '/kibana'}) %}
{% do KIBANADEFAULTS.kibana.config.elasticsearch.update({'hosts': ['https://' ~ GLOBALS.manager ~ ':9200']}) %} {% do KIBANADEFAULTS.kibana.config.elasticsearch.update({'hosts': ['https://' ~ GLOBALS.manager ~ ':9200']}) %}

View File

@@ -3,6 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
include: include:
- kibana.enabled - kibana.enabled
@@ -28,3 +29,27 @@ so-kibana-dashboard-load:
- require: - require:
- sls: kibana.enabled - sls: kibana.enabled
- file: dashboard_saved_objects_template - file: dashboard_saved_objects_template
{%- if HIGHLANDER %}
dashboard_saved_objects_template_hl:
file.managed:
- name: /opt/so/conf/kibana/hl.ndjson.template
- source: salt://kibana/files/hl.ndjson
- user: 932
- group: 939
- show_changes: False
dashboard_saved_objects_hl_changes:
file.absent:
- names:
- /opt/so/state/kibana_hl.txt
- onchanges:
- file: dashboard_saved_objects_template_hl
so-kibana-dashboard-load_hl:
cmd.run:
- name: /usr/sbin/so-kibana-config-load -i /opt/so/conf/kibana/hl.ndjson.template
- cwd: /opt/so
- require:
- sls: kibana.enabled
- file: dashboard_saved_objects_template_hl
{%- endif %}

View File

@@ -1,5 +1,6 @@
#!/bin/bash #!/bin/bash
. /usr/sbin/so-common . /usr/sbin/so-common
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
## This hackery will be removed if using Elastic Auth ## ## This hackery will be removed if using Elastic Auth ##
@@ -8,6 +9,10 @@ SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http:
# Disable certain Features from showing up in the Kibana UI # Disable certain Features from showing up in the Kibana UI
echo echo
echo "Setting up default Kibana Space:" echo "Setting up default Space:"
{% if HIGHLANDER %}
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log
{% else %}
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","entityManager"]} ' >> /opt/so/log/kibana/misc.log curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","entityManager"]} ' >> /opt/so/log/kibana/misc.log
{% endif %}
echo echo

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
@@ -19,38 +19,32 @@ so-kratos:
- name: so-kratos - name: so-kratos
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-kratos'].ip }} - ipv4_address: {{ DOCKER.containers['so-kratos'].ip }}
- binds: - binds:
- /opt/so/conf/kratos/:/kratos-conf:ro - /opt/so/conf/kratos/:/kratos-conf:ro
- /opt/so/log/kratos/:/kratos-log:rw - /opt/so/log/kratos/:/kratos-log:rw
- /nsm/kratos/db:/kratos-data:rw - /nsm/kratos/db:/kratos-data:rw
{% if DOCKERMERGED.containers['so-kratos'].custom_bind_mounts %} {% if DOCKER.containers['so-kratos'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-kratos'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-kratos'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-kratos'].port_bindings %} {% for BINDING in DOCKER.containers['so-kratos'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-kratos'].extra_hosts %} {% if DOCKER.containers['so-kratos'].extra_hosts %}
- extra_hosts: - extra_hosts:
{% for XTRAHOST in DOCKERMERGED.containers['so-kratos'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-kratos'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-kratos'].extra_env %} {% if DOCKER.containers['so-kratos'].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKERMERGED.containers['so-kratos'].extra_env %} {% for XTRAENV in DOCKER.containers['so-kratos'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-kratos'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-kratos'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- restart_policy: unless-stopped - restart_policy: unless-stopped
- watch: - watch:
- file: kratosschema - file: kratosschema

View File

@@ -180,6 +180,16 @@ logrotate:
- extension .log - extension .log
- dateext - dateext
- dateyesterday - dateyesterday
/opt/so/log/stenographer/*_x_log:
- daily
- rotate 14
- missingok
- copytruncate
- compress
- create
- extension .log
- dateext
- dateyesterday
/opt/so/log/salt/so-salt-minion-check: /opt/so/log/salt/so-salt-minion-check:
- daily - daily
- rotate 14 - rotate 14

View File

@@ -112,6 +112,13 @@ logrotate:
multiline: True multiline: True
global: True global: True
forcedType: "[]string" forcedType: "[]string"
"/opt/so/log/stenographer/*_x_log":
description: List of logrotate options for this file.
title: /opt/so/log/stenographer/*.log
advanced: True
multiline: True
global: True
forcedType: "[]string"
"/opt/so/log/salt/so-salt-minion-check": "/opt/so/log/salt/so-salt-minion-check":
description: List of logrotate options for this file. description: List of logrotate options for this file.
title: /opt/so/log/salt/so-salt-minion-check title: /opt/so/log/salt/so-salt-minion-check

View File

@@ -36,6 +36,10 @@ logstash:
- gid: 931 - gid: 931
- home: /opt/so/conf/logstash - home: /opt/so/conf/logstash
lslibdir:
file.absent:
- name: /opt/so/conf/logstash/lib
logstash_sbin: logstash_sbin:
file.recurse: file.recurse:
- name: /usr/sbin - name: /usr/sbin

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %} {% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
{% from 'logstash/map.jinja' import LOGSTASH_NODES %} {% from 'logstash/map.jinja' import LOGSTASH_NODES %}
{% set lsheap = LOGSTASH_MERGED.settings.lsheap %} {% set lsheap = LOGSTASH_MERGED.settings.lsheap %}
@@ -32,7 +32,7 @@ so-logstash:
- name: so-logstash - name: so-logstash
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-logstash'].ip }} - ipv4_address: {{ DOCKER.containers['so-logstash'].ip }}
- user: logstash - user: logstash
- extra_hosts: - extra_hosts:
{% for node in LOGSTASH_NODES %} {% for node in LOGSTASH_NODES %}
@@ -40,20 +40,20 @@ so-logstash:
- {{hostname}}:{{ip}} - {{hostname}}:{{ip}}
{% endfor %} {% endfor %}
{% endfor %} {% endfor %}
{% if DOCKERMERGED.containers['so-logstash'].extra_hosts %} {% if DOCKER.containers['so-logstash'].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers['so-logstash'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-logstash'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- environment: - environment:
- LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }}
{% if DOCKERMERGED.containers['so-logstash'].extra_env %} {% if DOCKER.containers['so-logstash'].extra_env %}
{% for XTRAENV in DOCKERMERGED.containers['so-logstash'].extra_env %} {% for XTRAENV in DOCKER.containers['so-logstash'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-logstash'].port_bindings %} {% for BINDING in DOCKER.containers['so-logstash'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- binds: - binds:
@@ -91,17 +91,11 @@ so-logstash:
- /opt/so/log/fleet/:/osquery/logs:ro - /opt/so/log/fleet/:/osquery/logs:ro
- /opt/so/log/strelka:/strelka:ro - /opt/so/log/strelka:/strelka:ro
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-logstash'].custom_bind_mounts %} {% if DOCKER.containers['so-logstash'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-logstash'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-logstash'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-logstash'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-logstash'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: lsetcsync - file: lsetcsync
- file: trusttheca - file: trusttheca

View File

@@ -1,2 +1,2 @@
https://repo.securityonion.net/file/so-repo/prod/3/oracle/9 https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9
https://repo-alt.securityonion.net/prod/3/oracle/9 https://repo-alt.securityonion.net/prod/2.4/oracle/9

View File

@@ -63,9 +63,11 @@ yara_log_dir:
- user - user
- group - group
{% if GLOBALS.os_family == 'RedHat' %}
install_createrepo: install_createrepo:
pkg.installed: pkg.installed:
- name: createrepo_c - name: createrepo_c
{% endif %}
repo_conf_dir: repo_conf_dir:
file.directory: file.directory:

View File

@@ -134,8 +134,8 @@ function require() {
function verifyEnvironment() { function verifyEnvironment() {
require "jq" require "jq"
require "curl" require "curl"
response=$(curl -Ss -L ${hydraUrl}/health/alive) response=$(curl -Ss -L ${hydraUrl}/)
[[ "$response" != '{"status":"ok"}' ]] && fail "Unable to communicate with Hydra; specify URL via HYDRA_URL environment variable" [[ "$response" != *"Error 404"* ]] && fail "Unable to communicate with Hydra; specify URL via HYDRA_URL environment variable"
} }
function createFile() { function createFile() {

View File

@@ -462,14 +462,19 @@ function add_sensor_to_minion() {
echo " lb_procs: '$CORECOUNT'" echo " lb_procs: '$CORECOUNT'"
echo "suricata:" echo "suricata:"
echo " enabled: True " echo " enabled: True "
echo " pcap:"
echo " enabled: True"
if [[ $is_pcaplimit ]]; then if [[ $is_pcaplimit ]]; then
echo " pcap:"
echo " maxsize: $MAX_PCAP_SPACE" echo " maxsize: $MAX_PCAP_SPACE"
fi fi
echo " config:" echo " config:"
echo " af-packet:" echo " af-packet:"
echo " threads: '$CORECOUNT'" echo " threads: '$CORECOUNT'"
echo "pcap:"
echo " enabled: True"
if [[ $is_pcaplimit ]]; then
echo " config:"
echo " diskfreepercentage: $DFREEPERCENT"
fi
echo " " echo " "
} >> $PILLARFILE } >> $PILLARFILE
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then

View File

@@ -143,7 +143,7 @@ show_usage() {
echo " -v Show verbose output (files changed/added/deleted)" echo " -v Show verbose output (files changed/added/deleted)"
echo " -vv Show very verbose output (includes file diffs)" echo " -vv Show very verbose output (includes file diffs)"
echo " --test Test mode - show what would change without making changes" echo " --test Test mode - show what would change without making changes"
echo " branch Git branch to checkout (default: 3/main)" echo " branch Git branch to checkout (default: 2.4/main)"
echo "" echo ""
echo "Examples:" echo "Examples:"
echo " $0 # Normal operation" echo " $0 # Normal operation"
@@ -193,7 +193,7 @@ done
# Set default branch if not provided # Set default branch if not provided
if [ -z "$BRANCH" ]; then if [ -z "$BRANCH" ]; then
BRANCH=3/main BRANCH=2.4/main
fi fi
got_root got_root

View File

@@ -22,7 +22,7 @@ def showUsage(args):
print(' removelistitem - Remove a list item from a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr) print(' removelistitem - Remove a list item from a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr)
print(' replacelistobject - Replace a list object based on a condition. Requires KEY, CONDITION_FIELD, CONDITION_VALUE, and JSON_OBJECT args.', file=sys.stderr) print(' replacelistobject - Replace a list object based on a condition. Requires KEY, CONDITION_FIELD, CONDITION_VALUE, and JSON_OBJECT args.', file=sys.stderr)
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr) print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr)
print(' get [-r] - Displays (to stdout) the value stored in the given key. Requires KEY arg. Use -r for raw output without YAML formatting.', file=sys.stderr) print(' get - Displays (to stdout) the value stored in the given key. Requires KEY arg.', file=sys.stderr)
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr) print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr)
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.', file=sys.stderr) print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.', file=sys.stderr)
print(' help - Prints this usage information.', file=sys.stderr) print(' help - Prints this usage information.', file=sys.stderr)
@@ -256,7 +256,7 @@ def replacelistobject(args):
def addKey(content, key, value): def addKey(content, key, value):
pieces = key.split(".", 1) pieces = key.split(".", 1)
if len(pieces) > 1: if len(pieces) > 1:
if pieces[0] not in content or content[pieces[0]] is None: if not pieces[0] in content:
content[pieces[0]] = {} content[pieces[0]] = {}
addKey(content[pieces[0]], pieces[1], value) addKey(content[pieces[0]], pieces[1], value)
elif key in content: elif key in content:
@@ -332,11 +332,6 @@ def getKeyValue(content, key):
def get(args): def get(args):
raw = False
if len(args) > 0 and args[0] == '-r':
raw = True
args = args[1:]
if len(args) != 2: if len(args) != 2:
print('Missing filename or key arg', file=sys.stderr) print('Missing filename or key arg', file=sys.stderr)
showUsage(None) showUsage(None)
@@ -351,15 +346,7 @@ def get(args):
print(f"Key '{key}' not found by so-yaml.py", file=sys.stderr) print(f"Key '{key}' not found by so-yaml.py", file=sys.stderr)
return 2 return 2
if raw: print(yaml.safe_dump(output))
if isinstance(output, bool):
print(str(output).lower())
elif isinstance(output, (dict, list)):
print(yaml.safe_dump(output).strip())
else:
print(output)
else:
print(yaml.safe_dump(output))
return 0 return 0

View File

@@ -395,17 +395,6 @@ class TestRemove(unittest.TestCase):
self.assertEqual(result, 0) self.assertEqual(result, 0)
self.assertIn("45\n...", mock_stdout.getvalue()) self.assertIn("45\n...", mock_stdout.getvalue())
def test_get_int_raw(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get(["-r", filename, "key1.child2.deep1"])
self.assertEqual(result, 0)
self.assertEqual("45\n", mock_stdout.getvalue())
def test_get_str(self): def test_get_str(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout: with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml" filename = "/tmp/so-yaml_test-get.yaml"
@@ -417,51 +406,6 @@ class TestRemove(unittest.TestCase):
self.assertEqual(result, 0) self.assertEqual(result, 0)
self.assertIn("hello\n...", mock_stdout.getvalue()) self.assertIn("hello\n...", mock_stdout.getvalue())
def test_get_str_raw(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get(["-r", filename, "key1.child2.deep1"])
self.assertEqual(result, 0)
self.assertEqual("hello\n", mock_stdout.getvalue())
def test_get_bool(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get([filename, "key2"])
self.assertEqual(result, 0)
self.assertIn("false\n...", mock_stdout.getvalue())
def test_get_bool_raw(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get(["-r", filename, "key2"])
self.assertEqual(result, 0)
self.assertEqual("false\n", mock_stdout.getvalue())
def test_get_dict_raw(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get(["-r", filename, "key1"])
self.assertEqual(result, 0)
self.assertIn("child1: 123", mock_stdout.getvalue())
self.assertNotIn("...", mock_stdout.getvalue())
def test_get_list(self): def test_get_list(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout: with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml" filename = "/tmp/so-yaml_test-get.yaml"

File diff suppressed because it is too large Load Diff

184
salt/manager/tools/sbin/soupto3 Executable file
View File

@@ -0,0 +1,184 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
UPDATE_URL=https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/refs/heads/3/main/VERSION
# Check if already running version 3
CURRENT_VERSION=$(cat /etc/soversion 2>/dev/null)
if [[ "$CURRENT_VERSION" =~ ^3\. ]]; then
echo ""
echo "========================================================================="
echo " Already Running Security Onion 3"
echo "========================================================================="
echo ""
echo " This system is already running Security Onion $CURRENT_VERSION."
echo " Use 'soup' to update within the 3.x release line."
echo ""
exit 0
fi
echo ""
echo "Checking PCAP settings."
echo ""
# Check pcapengine setting - must be SURICATA before upgrading to version 3
PCAP_ENGINE=$(lookup_pillar "pcapengine")
PCAP_DELETED=false
prompt_delete_pcap() {
read -rp " Would you like to delete all remaining Stenographer PCAP data? (y/N): " DELETE_PCAP
if [[ "$DELETE_PCAP" =~ ^[Yy]$ ]]; then
echo ""
echo " WARNING: This will permanently delete all Stenographer PCAP data"
echo " on all nodes. This action cannot be undone."
echo ""
read -rp " Are you sure? (y/N): " CONFIRM_DELETE
if [[ "$CONFIRM_DELETE" =~ ^[Yy]$ ]]; then
echo ""
echo " Deleting Stenographer PCAP data on all nodes..."
salt '*' cmd.run "rm -rf /nsm/pcap/* && rm -rf /nsm/pcapindex/*"
echo " Done."
PCAP_DELETED=true
else
echo ""
echo " Delete cancelled."
fi
fi
}
pcapengine_not_changed() {
echo ""
echo " PCAP engine must be set to SURICATA before upgrading to Security Onion 3."
echo " You can change this in SOC by navigating to:"
echo " Configuration -> global -> pcapengine"
}
prompt_change_engine() {
local current_engine=$1
echo ""
read -rp " Would you like to change the PCAP engine to SURICATA now? (y/N): " CHANGE_ENGINE
if [[ "$CHANGE_ENGINE" =~ ^[Yy]$ ]]; then
if [[ "$PCAP_DELETED" != "true" ]]; then
echo ""
echo " WARNING: Stenographer PCAP data was not deleted. If you proceed,"
echo " this data will no longer be accessible through SOC and will never"
echo " be automatically deleted. You will need to manually remove it later."
echo ""
read -rp " Continue with changing pcapengine to SURICATA? (y/N): " CONFIRM_CHANGE
if [[ ! "$CONFIRM_CHANGE" =~ ^[Yy]$ ]]; then
pcapengine_not_changed
return 1
fi
fi
echo ""
echo " Updating PCAP engine to SURICATA..."
so-yaml.py replace /opt/so/saltstack/local/pillar/global/soc_global.sls global.pcapengine SURICATA
echo " Done."
return 0
else
pcapengine_not_changed
return 1
fi
}
case "$PCAP_ENGINE" in
SURICATA)
echo "PCAP engine settings OK."
;;
TRANSITION|STENO)
echo ""
echo "========================================================================="
echo " PCAP Engine Check Failed"
echo "========================================================================="
echo ""
echo " Your PCAP engine is currently set to $PCAP_ENGINE."
echo ""
echo " Before upgrading to Security Onion 3, Stenographer PCAP data must be"
echo " removed and the PCAP engine must be set to SURICATA."
echo ""
echo " To check remaining Stenographer PCAP usage, run:"
echo " salt '*' cmd.run 'du -sh /nsm/pcap'"
echo ""
prompt_delete_pcap
if ! prompt_change_engine "$PCAP_ENGINE"; then
echo ""
exit 1
fi
;;
*)
echo ""
echo "========================================================================="
echo " PCAP Engine Check Failed"
echo "========================================================================="
echo ""
echo " Unable to determine the PCAP engine setting (got: '$PCAP_ENGINE')."
echo " Please ensure the PCAP engine is set to SURICATA."
echo " In SOC, navigate to Configuration -> global -> pcapengine"
echo " and change the value to SURICATA."
echo ""
exit 1
;;
esac
echo ""
echo "Checking Versions."
echo ""
# Check if Security Onion 3 has been released
VERSION=$(curl -sSf "$UPDATE_URL" 2>/dev/null)
if [[ -z "$VERSION" ]]; then
echo ""
echo "========================================================================="
echo " Unable to Check Version"
echo "========================================================================="
echo ""
echo " Could not retrieve version information from:"
echo " $UPDATE_URL"
echo ""
echo " Please check your network connection and try again."
echo ""
exit 1
fi
if [[ "$VERSION" == "UNRELEASED" ]]; then
echo ""
echo "========================================================================="
echo " Security Onion 3 Not Available"
echo "========================================================================="
echo ""
echo " Security Onion 3 has not been released yet."
echo ""
echo " Please check back later or visit https://securityonion.net for updates."
echo ""
exit 1
fi
# Validate version format (e.g., 3.0.2)
if [[ ! "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo ""
echo "========================================================================="
echo " Invalid Version"
echo "========================================================================="
echo ""
echo " Received unexpected version format: '$VERSION'"
echo ""
echo " Please check back later or visit https://securityonion.net for updates."
echo ""
exit 1
fi
echo "Security Onion 3 ($VERSION) is available. Upgrading..."
echo ""
# All checks passed - proceed with upgrade
BRANCH=3/main soup

View File

@@ -3,7 +3,6 @@ nginx:
external_suricata: False external_suricata: False
ssl: ssl:
replace_cert: False replace_cert: False
alt_names: []
config: config:
throttle_login_burst: 12 throttle_login_burst: 12
throttle_login_rate: 20 throttle_login_rate: 20

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'nginx/map.jinja' import NGINXMERGED %} {% from 'nginx/map.jinja' import NGINXMERGED %}
include: include:
@@ -37,11 +37,11 @@ so-nginx:
- hostname: so-nginx - hostname: so-nginx
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers[container_config].ip }} - ipv4_address: {{ DOCKER.containers[container_config].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKERMERGED.containers[container_config].extra_hosts %} {% if DOCKER.containers[container_config].extra_hosts %}
{% for XTRAHOST in DOCKERMERGED.containers[container_config].extra_hosts %} {% for XTRAHOST in DOCKER.containers[container_config].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -64,26 +64,20 @@ so-nginx:
- /opt/so/rules/nids/suri:/surirules:ro - /opt/so/rules/nids/suri:/surirules:ro
{% endif %} {% endif %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers[container_config].custom_bind_mounts %} {% if DOCKER.containers[container_config].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers[container_config].custom_bind_mounts %} {% for BIND in DOCKER.containers[container_config].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers[container_config].extra_env %} {% if DOCKER.containers[container_config].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKERMERGED.containers[container_config].extra_env %} {% for XTRAENV in DOCKER.containers[container_config].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers[container_config].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers[container_config].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- cap_add: NET_BIND_SERVICE - cap_add: NET_BIND_SERVICE
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers[container_config].port_bindings %} {% for BINDING in DOCKER.containers[container_config].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- watch: - watch:

View File

@@ -1,5 +1,5 @@
{%- from 'vars/globals.map.jinja' import GLOBALS %} {%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'docker/docker.map.jinja' import DOCKERMERGED %} {%- from 'docker/docker.map.jinja' import DOCKER %}
{%- from 'nginx/map.jinja' import NGINXMERGED %} {%- from 'nginx/map.jinja' import NGINXMERGED %}
{%- set role = grains.id.split('_') | last %} {%- set role = grains.id.split('_') | last %}
{%- set influxpass = salt['pillar.get']('secrets:influx_pass') %} {%- set influxpass = salt['pillar.get']('secrets:influx_pass') %}
@@ -60,8 +60,6 @@ http {
{%- endif %} {%- endif %}
{%- if GLOBALS.is_manager %} {%- if GLOBALS.is_manager %}
{%- set all_names = [GLOBALS.hostname, GLOBALS.url_base] + NGINXMERGED.ssl.alt_names %}
{%- set full_server_name = all_names | unique | join(' ') %}
server { server {
listen 80 default_server; listen 80 default_server;
@@ -71,7 +69,7 @@ http {
server { server {
listen 8443; listen 8443;
server_name {{ full_server_name }}; server_name {{ GLOBALS.url_base }};
root /opt/socore/html; root /opt/socore/html;
location /artifacts/ { location /artifacts/ {
try_files $uri =206; try_files $uri =206;
@@ -114,7 +112,7 @@ http {
server { server {
listen 7788; listen 7788;
server_name {{ full_server_name }}; server_name {{ GLOBALS.url_base }};
root /nsm/rules; root /nsm/rules;
location / { location / {
allow all; allow all;
@@ -130,7 +128,7 @@ http {
server { server {
listen 7789 ssl; listen 7789 ssl;
http2 on; http2 on;
server_name {{ full_server_name }}; server_name {{ GLOBALS.url_base }};
root /surirules; root /surirules;
add_header Content-Security-Policy "default-src 'self' 'unsafe-inline' 'unsafe-eval' https: data: blob: wss:; frame-ancestors 'self'"; add_header Content-Security-Policy "default-src 'self' 'unsafe-inline' 'unsafe-eval' https: data: blob: wss:; frame-ancestors 'self'";
@@ -163,7 +161,7 @@ http {
server { server {
listen 443 ssl; listen 443 ssl;
http2 on; http2 on;
server_name {{ full_server_name }}; server_name {{ GLOBALS.url_base }};
root /opt/socore/html; root /opt/socore/html;
index index.html; index index.html;
@@ -387,13 +385,15 @@ http {
error_page 429 = @error429; error_page 429 = @error429;
location @error401 { location @error401 {
if ($request_uri ~* (^/api/.*|^/connect/.*|^/oauth2/.*)) { if ($request_uri ~* (^/connect/.*|^/oauth2/.*)) {
return 401; return 401;
} }
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; if ($request_uri ~* ^/(?!(^/api/.*))) {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
}
if ($request_uri ~* ^/(?!(login|auth|oauth2|$))) { if ($request_uri ~* ^/(?!(api/|login|auth|oauth2|$))) {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
} }
return 302 /auth/self-service/login/browser; return 302 /auth/self-service/login/browser;

View File

@@ -30,12 +30,6 @@ nginx:
advanced: True advanced: True
global: True global: True
helpLink: nginx.html helpLink: nginx.html
alt_names:
description: Provide a list of alternate names to allow remote systems the ability to refer to the SOC API as another hostname.
global: True
forcedType: '[]string'
multiline: True
helpLink: nginx.html
config: config:
throttle_login_burst: throttle_login_burst:
description: Number of login requests that can burst without triggering request throttling. Higher values allow more repeated login attempts. Values greater than zero are required in order to provide a usable login flow. description: Number of login requests that can burst without triggering request throttling. Higher values allow more repeated login attempts. Values greater than zero are required in order to provide a usable login flow.

View File

@@ -49,17 +49,6 @@ managerssl_key:
- docker_container: so-nginx - docker_container: so-nginx
# Create a cert for the reverse proxy # Create a cert for the reverse proxy
{% set san_list = [GLOBALS.hostname, GLOBALS.node_ip, GLOBALS.url_base] + NGINXMERGED.ssl.alt_names %}
{% set unique_san_list = san_list | unique %}
{% set managerssl_san_list = [] %}
{% for item in unique_san_list %}
{% if item | ipaddr %}
{% do managerssl_san_list.append("IP:" + item) %}
{% else %}
{% do managerssl_san_list.append("DNS:" + item) %}
{% endif %}
{% endfor %}
{% set managerssl_san = managerssl_san_list | join(', ') %}
managerssl_crt: managerssl_crt:
x509.certificate_managed: x509.certificate_managed:
- name: /etc/pki/managerssl.crt - name: /etc/pki/managerssl.crt
@@ -67,7 +56,7 @@ managerssl_crt:
- signing_policy: managerssl - signing_policy: managerssl
- private_key: /etc/pki/managerssl.key - private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }} - CN: {{ GLOBALS.hostname }}
- subjectAltName: {{ managerssl_san }} - subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
- days_remaining: 7 - days_remaining: 7
- days_valid: 820 - days_valid: 820
- backup: True - backup: True

View File

@@ -2,6 +2,7 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ntp/config.map.jinja' import NTPCONFIG %} {% from 'ntp/config.map.jinja' import NTPCONFIG %}
chrony_pkg: chrony_pkg:
@@ -16,7 +17,11 @@ chronyconf:
- defaults: - defaults:
NTPCONFIG: {{ NTPCONFIG }} NTPCONFIG: {{ NTPCONFIG }}
{% if GLOBALS.os_family == 'RedHat' %}
chronyd: chronyd:
{% else %}
chrony:
{% endif %}
service.running: service.running:
- enable: True - enable: True
- watch: - watch:

22
salt/pcap/ca.sls Normal file
View File

@@ -0,0 +1,22 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states%}
stenoca:
file.directory:
- name: /opt/so/conf/steno/certs
- user: 941
- group: 939
- makedirs: True
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,59 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.is_sensor %}
delete_so-steno_so-status.conf:
file.line:
- name: /opt/so/conf/so-status/so-status.conf
- mode: delete
- match: so-steno
remove_stenographer_user:
user.absent:
- name: stenographer
- force: True
remove_stenographer_log_dir:
file.absent:
- name: /opt/so/log/stenographer
remove_stenoloss_script:
file.absent:
- name: /opt/so/conf/telegraf/scripts/stenoloss.sh
remove_steno_conf_dir:
file.absent:
- name: /opt/so/conf/steno
remove_so_pcap_export:
file.absent:
- name: /usr/sbin/so-pcap-export
remove_so_pcap_restart:
file.absent:
- name: /usr/sbin/so-pcap-restart
remove_so_pcap_start:
file.absent:
- name: /usr/sbin/so-pcap-start
remove_so_pcap_stop:
file.absent:
- name: /usr/sbin/so-pcap-stop
so-steno:
docker_container.absent:
- force: True
{% else %}
{{sls}}.non_sensor_node:
test.show_notification:
- text: "Stenographer cleanup not applicable on non-sensor nodes."
{% endif %}

View File

@@ -0,0 +1,13 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'pcap/defaults.yaml' as PCAPDEFAULTS %}
{% set PCAPMERGED = salt['pillar.get']('pcap', PCAPDEFAULTS.pcap, merge=True) %}
{# disable stenographer if the pcap engine is set to SURICATA #}
{% if GLOBALS.pcap_engine == "SURICATA" %}
{% do PCAPMERGED.update({'enabled': False}) %}
{% endif %}

87
salt/pcap/config.sls Normal file
View File

@@ -0,0 +1,87 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from "pcap/config.map.jinja" import PCAPMERGED %}
{% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS, PCAP_BPF_CALC, STENO_BPF_COMPILED %}
# PCAP Section
stenographergroup:
group.present:
- name: stenographer
- gid: 941
stenographer:
user.present:
- uid: 941
- gid: 941
- home: /opt/so/conf/steno
stenoconfdir:
file.directory:
- name: /opt/so/conf/steno
- user: 941
- group: 939
- makedirs: True
pcap_sbin:
file.recurse:
- name: /usr/sbin
- source: salt://pcap/tools/sbin
- user: 939
- group: 939
- file_mode: 755
{% if PCAPBPF and not PCAP_BPF_STATUS %}
stenoPCAPbpfcompilationfailure:
test.configurable_test_state:
- changes: False
- result: False
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ PCAP_BPF_CALC['stderr'] }}"
{% endif %}
stenoconf:
file.managed:
- name: /opt/so/conf/steno/config
- source: salt://pcap/files/config.jinja
- user: stenographer
- group: stenographer
- mode: 644
- template: jinja
- defaults:
PCAPMERGED: {{ PCAPMERGED }}
STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}"
pcaptmpdir:
file.directory:
- name: /nsm/pcaptmp
- user: 941
- group: 941
- makedirs: True
pcapindexdir:
file.directory:
- name: /nsm/pcapindex
- user: 941
- group: 941
- makedirs: True
stenolog:
file.directory:
- name: /opt/so/log/stenographer
- user: 941
- group: 941
- makedirs: True
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

11
salt/pcap/defaults.yaml Normal file
View File

@@ -0,0 +1,11 @@
pcap:
enabled: False
config:
maxdirectoryfiles: 30000
diskfreepercentage: 10
blocks: 2048
preallocate_file_mb: 4096
aiops: 128
pin_to_cpu: False
cpus_to_pin_to: []
disks: []

27
salt/pcap/disabled.sls Normal file
View File

@@ -0,0 +1,27 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
include:
- pcap.sostatus
so-steno:
docker_container.absent:
- force: True
so-steno_so-status.disabled:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-steno$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

63
salt/pcap/enabled.sls Normal file
View File

@@ -0,0 +1,63 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- pcap.ca
- pcap.config
- pcap.sostatus
so-steno:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-steno:{{ GLOBALS.so_version }}
- start: True
- network_mode: host
- privileged: True
- binds:
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw
- /opt/so/conf/steno/config:/etc/stenographer/config:rw
- /nsm/pcap:/nsm/pcap:rw
- /nsm/pcapindex:/nsm/pcapindex:rw
- /nsm/pcaptmp:/tmp:rw
- /opt/so/log/stenographer:/var/log/stenographer:rw
{% if DOCKER.containers['so-steno'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-steno'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-steno'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-steno'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-steno'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-steno'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch:
- file: stenoconf
- require:
- file: stenoconf
delete_so-steno_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-steno$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,11 @@
{
"Threads": [
{ "PacketsDirectory": "/nsm/pcap", "IndexDirectory": "/nsm/pcapindex", "MaxDirectoryFiles": {{ PCAPMERGED.config.maxdirectoryfiles }}, "DiskFreePercentage": {{ PCAPMERGED.config.diskfreepercentage }} }
]
, "StenotypePath": "/usr/bin/stenotype"
, "Interface": "{{ pillar.sensor.interface }}"
, "Port": 1234
, "Host": "127.0.0.1"
, "Flags": ["-v", "--blocks={{ PCAPMERGED.config.blocks }}", "--preallocate_file_mb={{ PCAPMERGED.config.preallocate_file_mb }}", "--aiops={{ PCAPMERGED.config.aiops }}", "--uid=stenographer", "--gid=stenographer"{{ STENO_BPF_COMPILED }}]
, "CertPath": "/etc/stenographer/certs"
}

41
salt/pcap/init.sls Normal file
View File

@@ -0,0 +1,41 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'pcap/config.map.jinja' import PCAPMERGED %}
include:
{% if PCAPMERGED.enabled and GLOBALS.role != 'so-import'%}
- pcap.enabled
{% elif GLOBALS.role == 'so-import' %}
- pcap.config
- pcap.disabled
{% else %}
- pcap.disabled
{% endif %}
# This directory needs to exist regardless of whether STENO is enabled or not, in order for
# Sensoroni to be able to look at old steno PCAP data
# if stenographer has never run as the pcap engine no 941 user is created, so we use socore as a placeholder.
# /nsm/pcap is empty until stenographer is used as pcap engine
{% set pcap_id = 941 %}
{% set user_list = salt['user.list_users']() %}
{% if GLOBALS.pcap_engine == "SURICATA" and 'stenographer' not in user_list %}
{% set pcap_id = 939 %}
{% endif %}
pcapdir:
file.directory:
- name: /nsm/pcap
- user: {{ pcap_id }}
- group: {{ pcap_id }}
- makedirs: True
pcapoutdir:
file.directory:
- name: /nsm/pcapout
- user: 939
- group: 939
- makedirs: True

35
salt/pcap/soc_pcap.yaml Normal file
View File

@@ -0,0 +1,35 @@
pcap:
enabled:
description: Enables or disables the Stenographer packet recording process. This process may already be disabled if Suricata is being used as the packet capture process.
helpLink: stenographer.html
config:
maxdirectoryfiles:
description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting.
helpLink: stenographer.html
diskfreepercentage:
description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated Sensor nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then youll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21.
helpLink: stenographer.html
blocks:
description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this.
advanced: True
helpLink: stenographer.html
preallocate_file_mb:
description: File size to pre-allocate for individual Stenographer PCAP files. You shouldn't need to change this.
advanced: True
helpLink: stenographer.html
aiops:
description: The max number of async writes to allow for Stenographer at once.
advanced: True
helpLink: stenographer.html
pin_to_cpu:
description: Enable CPU pinning for Stenographer PCAP.
advanced: True
helpLink: stenographer.html
cpus_to_pin_to:
description: CPU to pin Stenographer PCAP to. Currently only a single CPU is supported.
advanced: True
helpLink: stenographer.html
disks:
description: List of disks to use for Stenographer PCAP. This is currently not used.
advanced: True
helpLink: stenographer.html

21
salt/pcap/sostatus.sls Normal file
View File

@@ -0,0 +1,21 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
append_so-steno_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-steno
- unless: grep -q so-steno /opt/so/conf/so-status/so-status.conf
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,18 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [ $# -lt 2 ]; then
echo "Usage: $0 <steno-query> Output-Filename"
exit 1
fi
docker exec -t so-sensoroni scripts/stenoquery.sh "$1" -w /nsm/pcapout/$2.pcap
echo ""
echo "If successful, the output was written to: /nsm/pcapout/$2.pcap"

View File

@@ -0,0 +1,12 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-restart steno $1

View File

@@ -0,0 +1,12 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-start steno $1

View File

@@ -0,0 +1,12 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
/usr/sbin/so-stop steno $1

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Podman API Service
Requires=podman.socket
After=podman.socket
Documentation=man:podman-api(1)
StartLimitIntervalSec=0
[Service]
Type=oneshot
Environment=REGISTRIES_CONFIG_PATH=/etc/containers/registries.conf
ExecStart=/usr/bin/podman system service
TimeoutStopSec=30
KillMode=process
[Install]
WantedBy=multi-user.target
Also=podman.socket

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Podman API Socket
Documentation=man:podman-api(1)
[Socket]
ListenStream=%t/podman/podman.sock
SocketMode=0660
[Install]
WantedBy=sockets.target

View File

@@ -0,0 +1,48 @@
{
"args": {
"podman_options": {
"isolate": "true",
"mtu": "1500"
}
},
"cniVersion": "0.4.0",
"name": "sobridge",
"plugins": [
{
"type": "bridge",
"bridge": "sobridge",
"isGateway": true,
"ipMasq": false,
"mtu": 1500,
"hairpinMode": false,
"ipam": {
"type": "host-local",
"routes": [
{
"dst": "0.0.0.0/0"
}
],
"ranges": [
[
{
"subnet": "172.17.1.0/24",
"gateway": "172.17.1.1"
}
]
]
},
"capabilities": {
"ips": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": false
}
},
{
"type": "tuning"
}
]
}

56
salt/podman/init.sls Normal file
View File

@@ -0,0 +1,56 @@
{% from 'docker/docker.map.jinja' import DOCKER %}
Podman pkg:
pkg.installed:
- name: podman
cnipkg:
pkg.installed:
- name: containernetworking-plugins
{#
Podman service:
file.managed:
- name: /usr/lib/systemd/system/podman.service
- source: salt://podman/podman.service
#}
sobridgeconf:
file.managed:
- name: /etc/cni/net.d/sobridge.conflist
- source: salt://podman/files/sobridge.conflist
Podman_socket_service:
service.running:
- name: podman.socket
- enable: true
Podman_service:
service.running:
- name: podman.service
- enable: true
Docker socket:
file.symlink:
- name: /var/run/docker.sock
- target: /var/run/podman/podman.sock
podman_docker_symlink:
file.symlink:
- name: /usr/bin/docker
- target: /usr/bin/podman
{#
sos_docker_net:
docker_network.present:
- name: sobridge
- subnet: {{ DOCKER.range }}
- gateway: {{ DOCKER.bip }}
- options:
com.docker.network.bridge.name: 'sobridge'
com.docker.network.driver.mtu: '1500'
com.docker.network.bridge.enable_ip_masquerade: 'true'
com.docker.network.bridge.enable_icc: 'true'
com.docker.network.bridge.host_binding_ipv4: '0.0.0.0'
- unless: 'docker network ls | grep sobridge'
#}

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKERMERGED %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
@@ -21,9 +21,9 @@ so-redis:
- user: socore - user: socore
- networks: - networks:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKERMERGED.containers['so-redis'].ip }} - ipv4_address: {{ DOCKER.containers['so-redis'].ip }}
- port_bindings: - port_bindings:
{% for BINDING in DOCKERMERGED.containers['so-redis'].port_bindings %} {% for BINDING in DOCKER.containers['so-redis'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
- binds: - binds:
@@ -34,29 +34,23 @@ so-redis:
- /etc/pki/redis.crt:/certs/redis.crt:ro - /etc/pki/redis.crt:/certs/redis.crt:ro
- /etc/pki/redis.key:/certs/redis.key:ro - /etc/pki/redis.key:/certs/redis.key:ro
- /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro - /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro
{% if DOCKERMERGED.containers['so-redis'].custom_bind_mounts %} {% if DOCKER.containers['so-redis'].custom_bind_mounts %}
{% for BIND in DOCKERMERGED.containers['so-redis'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-redis'].extra_hosts %} {% if DOCKER.containers['so-redis'].extra_hosts %}
- extra_hosts: - extra_hosts:
{% for XTRAHOST in DOCKERMERGED.containers['so-redis'].extra_hosts %} {% for XTRAHOST in DOCKER.containers['so-redis'].extra_hosts %}
- {{ XTRAHOST }} - {{ XTRAHOST }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-redis'].extra_env %} {% if DOCKER.containers['so-redis'].extra_env %}
- environment: - environment:
{% for XTRAENV in DOCKERMERGED.containers['so-redis'].extra_env %} {% for XTRAENV in DOCKER.containers['so-redis'].extra_env %}
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKERMERGED.containers['so-redis'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKERMERGED.containers['so-redis'].ulimits %}
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
{% endfor %}
{% endif %}
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch: - watch:
- file: trusttheca - file: trusttheca

Some files were not shown because too many files have changed in this diff Show More