Compare commits

..

2 Commits

Author SHA1 Message Date
DefensiveDepth
422b4bc4c9 Add local custom Playbooks 2025-09-18 12:22:20 -04:00
DefensiveDepth
6cdd88808a Add local custom Playbooks 2025-09-18 12:07:21 -04:00
249 changed files with 2863 additions and 8882 deletions

View File

@@ -32,9 +32,6 @@ body:
- 2.4.170 - 2.4.170
- 2.4.180 - 2.4.180
- 2.4.190 - 2.4.190
- 2.4.200
- 2.4.201
- 2.4.210
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true

View File

@@ -4,7 +4,7 @@ on:
pull_request: pull_request:
paths: paths:
- "salt/sensoroni/files/analyzers/**" - "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin/**" - "salt/manager/tools/sbin"
jobs: jobs:
build: build:

View File

@@ -1,17 +1,17 @@
### 2.4.201-20260114 ISO image released on 2026/1/15 ### 2.4.180-20250916 ISO image released on 2025/09/17
### Download and Verify ### Download and Verify
2.4.201-20260114 ISO image: 2.4.180-20250916 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
MD5: 20E926E433203798512EF46E590C89B9 MD5: DE93880E38DE4BE45D05A41E1745CB1F
SHA1: 779E4084A3E1A209B494493B8F5658508B6014FA SHA1: AEA6948911E50A4A38E8729E0E965C565402E3FC
SHA256: 3D10E7C885AEC5C5D4F4E50F9644FF9728E8C0A2E36EBB8C96B32569685A7C40 SHA256: C9BD8CA071E43B048ABF9ED145B87935CB1D4BB839B2244A06FAD1BBA8EAC84A
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.201-20260114.iso.sig securityonion-2.4.201-20260114.iso gpg --verify securityonion-2.4.180-20250916.iso.sig securityonion-2.4.180-20250916.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Wed 14 Jan 2026 05:23:39 PM EST using RSA key ID FE507013 gpg: Signature made Tue 16 Sep 2025 06:30:19 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.4.210 2.4.190

View File

@@ -1,2 +0,0 @@
ca:
server:

View File

@@ -1,6 +1,5 @@
base: base:
'*': '*':
- ca
- global.soc_global - global.soc_global
- global.adv_global - global.adv_global
- docker.soc_docker - docker.soc_docker
@@ -44,6 +43,8 @@ base:
- secrets - secrets
- manager.soc_manager - manager.soc_manager
- manager.adv_manager - manager.adv_manager
- idstools.soc_idstools
- idstools.adv_idstools
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
@@ -116,6 +117,8 @@ base:
- elastalert.adv_elastalert - elastalert.adv_elastalert
- manager.soc_manager - manager.soc_manager
- manager.adv_manager - manager.adv_manager
- idstools.soc_idstools
- idstools.adv_idstools
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- kibana.soc_kibana - kibana.soc_kibana
@@ -155,6 +158,8 @@ base:
{% endif %} {% endif %}
- secrets - secrets
- healthcheck.standalone - healthcheck.standalone
- idstools.soc_idstools
- idstools.adv_idstools
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos - kratos.adv_kratos
- hydra.soc_hydra - hydra.soc_hydra

View File

@@ -1,91 +0,0 @@
#!/opt/saltstack/salt/bin/python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
"""
Salt execution module for hypervisor operations.
This module provides functions for managing hypervisor configurations,
including VM file management.
"""
import json
import logging
import os
log = logging.getLogger(__name__)
__virtualname__ = 'hypervisor'
def __virtual__():
"""
Only load this module if we're on a system that can manage hypervisors.
"""
return __virtualname__
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
"""
Remove a VM entry from the hypervisorVMs file.
Args:
vms_file_path (str): Path to the hypervisorVMs file
vm_hostname (str): Hostname of the VM to remove (without role suffix)
vm_role (str): Role of the VM
Returns:
dict: Result dictionary with success status and message
CLI Example:
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
"""
try:
# Check if file exists
if not os.path.exists(vms_file_path):
msg = f"VMs file not found: {vms_file_path}"
log.error(msg)
return {'result': False, 'comment': msg}
# Read current VMs
with open(vms_file_path, 'r') as f:
content = f.read().strip()
vms = json.loads(content) if content else []
# Find and remove the VM entry
original_count = len(vms)
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
if len(vms) < original_count:
# VM was found and removed, write back to file
with open(vms_file_path, 'w') as f:
json.dump(vms, f, indent=2)
# Set socore:socore ownership (939:939)
os.chown(vms_file_path, 939, 939)
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
log.info(msg)
return {'result': True, 'comment': msg}
else:
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
log.warning(msg)
return {'result': False, 'comment': msg}
except json.JSONDecodeError as e:
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
log.error(msg)
return {'result': False, 'comment': msg}
except Exception as e:
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
log.error(msg)
return {'result': False, 'comment': msg}

View File

@@ -7,14 +7,12 @@
""" """
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and for modifying network configurations within QCOW2 images and adjusting virtual machine hardware settings.
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network, It serves as a Salt interface to the so-qcow2-modify-network and so-kvm-modify-hardware scripts.
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
The module offers three main capabilities: The module offers two main capabilities:
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images 1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough) 2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
3. Volume Management: Create and attach virtual storage volumes for NSM data
This module is intended to work with Security Onion's virtualization infrastructure and is typically This module is intended to work with Security Onion's virtualization infrastructure and is typically
used in conjunction with salt-cloud for VM provisioning and management. used in conjunction with salt-cloud for VM provisioning and management.
@@ -246,90 +244,3 @@ def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False
except Exception as e: except Exception as e:
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e)) log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
raise raise
def create_volume_config(vm_name, size_gb, start=False):
'''
Usage:
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
Options:
vm_name
Name of the virtual machine to attach the volume to
size_gb
Volume size in GB (positive integer)
This determines the capacity of the virtual storage volume
start
Boolean flag to start the VM after volume creation
Optional - defaults to False
Examples:
1. **Create 500GB Volume:**
```bash
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
```
This creates a 500GB virtual volume for NSM storage
2. **Create 1TB Volume and Start VM:**
```bash
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
```
This creates a 1TB volume and starts the VM after attachment
Notes:
- VM must be stopped before volume creation
- Volume is created as a qcow2 image and attached to the VM
- This is an alternative to disk passthrough via modify_hardware_config
- Volume is automatically attached to the VM's libvirt configuration
- Requires so-kvm-create-volume script to be installed
- Volume files are stored in the hypervisor's VM storage directory
Description:
This function creates and attaches a virtual storage volume to a KVM virtual machine
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
This provides an alternative to physical disk passthrough, allowing flexible storage
allocation without requiring dedicated hardware. The VM can optionally be started
after the volume is successfully created and attached.
Exit Codes:
0: Success
1: Invalid parameters
2: VM state error (running when should be stopped)
3: Volume creation error
4: System command error
255: Unexpected error
Logging:
- All operations are logged to the salt minion log
- Log entries are prefixed with 'qcow2 module:'
- Volume creation and attachment operations are logged
- Errors include detailed messages and stack traces
- Final status of volume creation is logged
'''
# Validate size_gb parameter
if not isinstance(size_gb, int) or size_gb <= 0:
raise ValueError('size_gb must be a positive integer.')
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
if start:
cmd.append('-S')
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
ret = {
'retcode': result.returncode,
'stdout': result.stdout,
'stderr': result.stderr
}
if result.returncode != 0:
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
else:
log.info('qcow2 module: Script executed successfully.')
return ret
except Exception as e:
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
raise

View File

@@ -172,15 +172,7 @@ MANAGER_HOSTNAME = socket.gethostname()
def _download_image(): def _download_image():
""" """
Download and validate the Oracle Linux KVM image with retry logic and progress monitoring. Download and validate the Oracle Linux KVM image.
Features:
- Detects stalled downloads (no progress for 30 seconds)
- Retries up to 3 times on failure
- Connection timeout of 30 seconds
- Read timeout of 60 seconds
- Cleans up partial downloads on failure
Returns: Returns:
bool: True if successful or file exists with valid checksum, False on error bool: True if successful or file exists with valid checksum, False on error
""" """
@@ -193,107 +185,45 @@ def _download_image():
os.unlink(IMAGE_PATH) os.unlink(IMAGE_PATH)
log.info("Starting image download process") log.info("Starting image download process")
# Retry configuration
max_attempts = 3
retry_delay = 5 # seconds to wait between retry attempts
stall_timeout = 30 # seconds without progress before considering download stalled
connection_timeout = 30 # seconds to establish connection
read_timeout = 60 # seconds to wait for data chunks
for attempt in range(1, max_attempts + 1):
log.info("Download attempt %d of %d", attempt, max_attempts)
try:
# Download file with timeouts
log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH)
response = requests.get(
IMAGE_URL,
stream=True,
timeout=(connection_timeout, read_timeout)
)
response.raise_for_status()
# Get total file size for progress tracking try:
total_size = int(response.headers.get('content-length', 0)) # Download file
downloaded_size = 0 log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH)
last_log_time = 0 response = requests.get(IMAGE_URL, stream=True)
last_progress_time = time.time() response.raise_for_status()
last_downloaded_size = 0
# Save file with progress logging and stall detection # Get total file size for progress tracking
with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f: total_size = int(response.headers.get('content-length', 0))
for chunk in response.iter_content(chunk_size=8192): downloaded_size = 0
if chunk: # filter out keep-alive new chunks last_log_time = 0
f.write(chunk)
downloaded_size += len(chunk)
current_time = time.time()
# Check for stalled download
if downloaded_size > last_downloaded_size:
# Progress made, reset stall timer
last_progress_time = current_time
last_downloaded_size = downloaded_size
elif current_time - last_progress_time > stall_timeout:
# No progress for stall_timeout seconds
raise Exception(
f"Download stalled: no progress for {stall_timeout} seconds "
f"at {downloaded_size}/{total_size} bytes"
)
# Log progress every second
if current_time - last_log_time >= 1:
progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0
log.info("Progress - %.1f%% (%d/%d bytes)",
progress, downloaded_size, total_size)
last_log_time = current_time
# Validate downloaded file # Save file with progress logging
log.info("Download complete, validating checksum...") with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f:
if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256): for chunk in response.iter_content(chunk_size=8192):
log.error("Checksum validation failed on attempt %d", attempt) f.write(chunk)
os.unlink(IMAGE_PATH) downloaded_size += len(chunk)
if attempt < max_attempts:
log.info("Will retry download...")
continue
else:
log.error("All download attempts failed due to checksum mismatch")
return False
log.info("Successfully downloaded and validated Oracle Linux KVM image")
return True
except requests.exceptions.Timeout as e:
log.error("Download attempt %d failed: Timeout - %s", attempt, str(e))
if os.path.exists(IMAGE_PATH):
os.unlink(IMAGE_PATH)
if attempt < max_attempts:
log.info("Will retry download in %d seconds...", retry_delay)
time.sleep(retry_delay)
else:
log.error("All download attempts failed due to timeout")
except requests.exceptions.RequestException as e: # Log progress every second
log.error("Download attempt %d failed: Network error - %s", attempt, str(e)) current_time = time.time()
if os.path.exists(IMAGE_PATH): if current_time - last_log_time >= 1:
os.unlink(IMAGE_PATH) progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0
if attempt < max_attempts: log.info("Progress - %.1f%% (%d/%d bytes)",
log.info("Will retry download in %d seconds...", retry_delay) progress, downloaded_size, total_size)
time.sleep(retry_delay) last_log_time = current_time
else:
log.error("All download attempts failed due to network errors") # Validate downloaded file
if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256):
except Exception as e: os.unlink(IMAGE_PATH)
log.error("Download attempt %d failed: %s", attempt, str(e)) return False
if os.path.exists(IMAGE_PATH):
os.unlink(IMAGE_PATH) log.info("Successfully downloaded and validated Oracle Linux KVM image")
if attempt < max_attempts: return True
log.info("Will retry download in %d seconds...", retry_delay)
time.sleep(retry_delay) except Exception as e:
else: log.error("Error downloading hypervisor image: %s", str(e))
log.error("All download attempts failed") if os.path.exists(IMAGE_PATH):
os.unlink(IMAGE_PATH)
return False return False
def _check_ssh_keys_exist(): def _check_ssh_keys_exist():
""" """
@@ -489,28 +419,25 @@ def _ensure_hypervisor_host_dir(minion_id: str = None):
log.error(f"Error creating hypervisor host directory: {str(e)}") log.error(f"Error creating hypervisor host directory: {str(e)}")
return False return False
def _apply_dyanno_hypervisor_state(status): def _apply_dyanno_hypervisor_state():
""" """
Apply the soc.dyanno.hypervisor state on the salt master. Apply the soc.dyanno.hypervisor state on the salt master.
This function applies the soc.dyanno.hypervisor state on the salt master This function applies the soc.dyanno.hypervisor state on the salt master
to update the hypervisor annotation and ensure all hypervisor host directories exist. to update the hypervisor annotation and ensure all hypervisor host directories exist.
Args:
status: Status passed to the hypervisor annotation state
Returns: Returns:
bool: True if state was applied successfully, False otherwise bool: True if state was applied successfully, False otherwise
""" """
try: try:
log.info(f"Applying soc.dyanno.hypervisor state on salt master with status: {status}") log.info("Applying soc.dyanno.hypervisor state on salt master")
# Initialize the LocalClient # Initialize the LocalClient
local = salt.client.LocalClient() local = salt.client.LocalClient()
# Target the salt master to apply the soc.dyanno.hypervisor state # Target the salt master to apply the soc.dyanno.hypervisor state
target = MANAGER_HOSTNAME + '_*' target = MANAGER_HOSTNAME + '_*'
state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', f"pillar={{'baseDomain': {{'status': '{status}'}}}}", 'concurrent=True'], tgt_type='glob') state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', "pillar={'baseDomain': {'status': 'PreInit'}}", 'concurrent=True'], tgt_type='glob')
log.debug(f"state_result: {state_result}") log.debug(f"state_result: {state_result}")
# Check if state was applied successfully # Check if state was applied successfully
if state_result: if state_result:
@@ -527,17 +454,17 @@ def _apply_dyanno_hypervisor_state(status):
success = False success = False
if success: if success:
log.info(f"Successfully applied soc.dyanno.hypervisor state with status: {status}") log.info("Successfully applied soc.dyanno.hypervisor state")
return True return True
else: else:
log.error(f"Failed to apply soc.dyanno.hypervisor state with status: {status}") log.error("Failed to apply soc.dyanno.hypervisor state")
return False return False
else: else:
log.error(f"No response from salt master when applying soc.dyanno.hypervisor state with status: {status}") log.error("No response from salt master when applying soc.dyanno.hypervisor state")
return False return False
except Exception as e: except Exception as e:
log.error(f"Error applying soc.dyanno.hypervisor state with status: {status}: {str(e)}") log.error(f"Error applying soc.dyanno.hypervisor state: {str(e)}")
return False return False
def _apply_cloud_config_state(): def _apply_cloud_config_state():
@@ -671,6 +598,11 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
log.warning("Failed to apply salt.cloud.config state, continuing with setup") log.warning("Failed to apply salt.cloud.config state, continuing with setup")
# We don't return an error here as we want to continue with the setup process # We don't return an error here as we want to continue with the setup process
# Apply the soc.dyanno.hypervisor state on the salt master
if not _apply_dyanno_hypervisor_state():
log.warning("Failed to apply soc.dyanno.hypervisor state, continuing with setup")
# We don't return an error here as we want to continue with the setup process
log.info("Starting setup_environment in setup_hypervisor runner") log.info("Starting setup_environment in setup_hypervisor runner")
# Check if environment is already set up # Check if environment is already set up
@@ -684,12 +616,9 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
# Handle image setup if needed # Handle image setup if needed
if not image_valid: if not image_valid:
_apply_dyanno_hypervisor_state('ImageDownloadStart')
log.info("Starting image download/validation process") log.info("Starting image download/validation process")
if not _download_image(): if not _download_image():
log.error("Image download failed") log.error("Image download failed")
# Update hypervisor annotation with failure status
_apply_dyanno_hypervisor_state('ImageDownloadFailed')
return { return {
'success': False, 'success': False,
'error': 'Image download failed', 'error': 'Image download failed',
@@ -702,8 +631,6 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
log.info("Setting up SSH keys") log.info("Setting up SSH keys")
if not _setup_ssh_keys(): if not _setup_ssh_keys():
log.error("SSH key setup failed") log.error("SSH key setup failed")
# Update hypervisor annotation with failure status
_apply_dyanno_hypervisor_state('SSHKeySetupFailed')
return { return {
'success': False, 'success': False,
'error': 'SSH key setup failed', 'error': 'SSH key setup failed',
@@ -728,12 +655,6 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
success = vm_result.get('success', False) success = vm_result.get('success', False)
log.info("Setup environment completed with status: %s", "SUCCESS" if success else "FAILED") log.info("Setup environment completed with status: %s", "SUCCESS" if success else "FAILED")
# Update hypervisor annotation with success status
if success:
_apply_dyanno_hypervisor_state('PreInit')
else:
_apply_dyanno_hypervisor_state('SetupFailed')
# If setup was successful and we have a minion_id, run highstate # If setup was successful and we have a minion_id, run highstate
if success and minion_id: if success and minion_id:
log.info("Running highstate on hypervisor %s", minion_id) log.info("Running highstate on hypervisor %s", minion_id)

View File

@@ -15,7 +15,11 @@
'salt.minion-check', 'salt.minion-check',
'sensoroni', 'sensoroni',
'salt.lasthighstate', 'salt.lasthighstate',
'salt.minion', 'salt.minion'
] %}
{% set ssl_states = [
'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
@@ -24,7 +28,7 @@
{% set manager_states = [ {% set manager_states = [
'salt.master', 'salt.master',
'ca.server', 'ca',
'registry', 'registry',
'manager', 'manager',
'nginx', 'nginx',
@@ -34,6 +38,8 @@
'hydra', 'hydra',
'elasticfleet', 'elasticfleet',
'elastic-fleet-package-registry', 'elastic-fleet-package-registry',
'idstools',
'suricata.manager',
'utility' 'utility'
] %} ] %}
@@ -71,24 +77,28 @@
{# Map role-specific states #} {# Map role-specific states #}
{% set role_states = { {% set role_states = {
'so-eval': ( 'so-eval': (
ssl_states +
manager_states + manager_states +
sensor_states + sensor_states +
elastic_stack_states | reject('equalto', 'logstash') | list + elastic_stack_states | reject('equalto', 'logstash') | list
['logstash.ssl']
), ),
'so-heavynode': ( 'so-heavynode': (
ssl_states +
sensor_states + sensor_states +
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx'] ['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
), ),
'so-idh': ( 'so-idh': (
ssl_states +
['idh'] ['idh']
), ),
'so-import': ( 'so-import': (
ssl_states +
manager_states + manager_states +
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list + sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager'] ['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
), ),
'so-manager': ( 'so-manager': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -96,6 +106,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managerhype': ( 'so-managerhype': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] + ['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
stig_states + stig_states +
@@ -103,6 +114,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managersearch': ( 'so-managersearch': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -110,10 +122,12 @@
elastic_stack_states elastic_stack_states
), ),
'so-searchnode': ( 'so-searchnode': (
ssl_states +
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] + ['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
stig_states stig_states
), ),
'so-standalone': ( 'so-standalone': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
sensor_states + sensor_states +
@@ -122,24 +136,29 @@
elastic_stack_states elastic_stack_states
), ),
'so-sensor': ( 'so-sensor': (
ssl_states +
sensor_states + sensor_states +
['nginx'] + ['nginx'] +
stig_states stig_states
), ),
'so-fleet': ( 'so-fleet': (
ssl_states +
stig_states + stig_states +
['logstash', 'nginx', 'healthcheck', 'elasticfleet'] ['logstash', 'nginx', 'healthcheck', 'elasticfleet']
), ),
'so-receiver': ( 'so-receiver': (
ssl_states +
kafka_states + kafka_states +
stig_states + stig_states +
['logstash', 'redis'] ['logstash', 'redis']
), ),
'so-hypervisor': ( 'so-hypervisor': (
ssl_states +
stig_states + stig_states +
['hypervisor', 'libvirt'] ['hypervisor', 'libvirt']
), ),
'so-desktop': ( 'so-desktop': (
['ssl', 'docker_clean', 'telegraf'] +
stig_states stig_states
) )
} %} } %}

View File

@@ -1,7 +1,4 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set PCAP_BPF_STATUS = 0 %}
{% set STENO_BPF_COMPILED = "" %}
{% if GLOBALS.pcap_engine == "TRANSITION" %} {% if GLOBALS.pcap_engine == "TRANSITION" %}
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %} {% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
{% else %} {% else %}
@@ -11,11 +8,3 @@
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }} {{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %} {% set PCAPBPF = BPFMERGED.pcap %}
{% endif %} {% endif %}
{% if PCAPBPF %}
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
{% if PCAP_BPF_CALC['retcode'] == 0 %}
{% set PCAP_BPF_STATUS = 1 %}
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
{% endif %}
{% endif %}

View File

@@ -1,11 +1,11 @@
bpf: bpf:
pcap: pcap:
description: List of BPF filters to apply to the PCAP engine. description: List of BPF filters to apply to Stenographer.
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: bpf.html helpLink: bpf.html
suricata: suricata:
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata. description: List of BPF filters to apply to Suricata.
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: bpf.html helpLink: bpf.html

View File

@@ -1,16 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% set SURICATA_BPF_STATUS = 0 %}
{% import 'bpf/macros.jinja' as MACROS %} {% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }} {{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
{% set SURICATABPF = BPFMERGED.suricata %} {% set SURICATABPF = BPFMERGED.suricata %}
{% if SURICATABPF %}
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
{% set SURICATA_BPF_STATUS = 1 %}
{% endif %}
{% endif %}

View File

@@ -1,16 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% set ZEEK_BPF_STATUS = 0 %}
{% import 'bpf/macros.jinja' as MACROS %} {% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }} {{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
{% set ZEEKBPF = BPFMERGED.zeek %} {% set ZEEKBPF = BPFMERGED.zeek %}
{% if ZEEKBPF %}
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
{% set ZEEK_BPF_STATUS = 1 %}
{% endif %}
{% endif %}

4
salt/ca/dirs.sls Normal file
View File

@@ -0,0 +1,4 @@
pki_issued_certs:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True

View File

@@ -3,10 +3,70 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
{% if GLOBALS.is_manager %} - ca.dirs
- ca.server
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 0
- backup: True
- replace: False
- require:
- sls: ca.dirs
- timeout: 30
- retry:
attempts: 5
interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %} {% endif %}
- ca.trustca

View File

@@ -1,3 +0,0 @@
{% set CA = {
'server': pillar.ca.server
}%}

View File

@@ -1,35 +1,7 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one pki_private_key:
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set setup_running = salt['cmd.retcode']('pgrep -x so-setup') == 0 %}
{% if setup_running%}
include:
- ssl.remove
remove_pki_private_key:
file.absent: file.absent:
- name: /etc/pki/ca.key - name: /etc/pki/ca.key
remove_pki_public_ca_crt: pki_public_ca_crt:
file.absent: file.absent:
- name: /etc/pki/ca.crt - name: /etc/pki/ca.crt
remove_trusttheca:
file.absent:
- name: /etc/pki/tls/certs/intca.crt
remove_pki_public_ca_crt_symlink:
file.absent:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
{% else %}
so-setup_not_running:
test.show_notification:
- text: "This state is reserved for usage during so-setup."
{% endif %}

View File

@@ -1,63 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 7
- backup: True
- replace: False
- timeout: 30
- retry:
attempts: 5
interval: 30
pki_public_ca_crt_symlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
- target: /etc/pki/ca.crt
- require:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,26 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- docker
# Trust the CA
trusttheca:
file.managed:
- name: /etc/pki/tls/certs/intca.crt
- source: salt://ca/files/ca.crt
- watch_in:
- service: docker_running
- show_changes: False
- makedirs: True
{% if GLOBALS.os_family == 'Debian' %}
symlinkca:
file.symlink:
- target: /etc/pki/tls/certs/intca.crt
- name: /etc/ssl/certs/intca.crt
{% endif %}

View File

@@ -1,21 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
{% if nsm_exists %}
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
nsm_total:
grains.present:
- name: nsm_total
- value: {{ nsm_total }}
{% else %}
nsm_missing:
test.succeed_without_changes:
- name: /nsm does not exist, skipping grain assignment
{% endif %}

View File

@@ -4,7 +4,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- common.grains
- common.packages - common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state - manager.elasticsearch # needed for elastic_curl_config state
@@ -177,7 +176,7 @@ so-status_script:
- source: salt://common/tools/sbin/so-status - source: salt://common/tools/sbin/so-status
- mode: 755 - mode: 755
{% if GLOBALS.is_sensor %} {% if GLOBALS.role in GLOBALS.sensor_roles %}
# Add sensor cleanup # Add sensor cleanup
so-sensor-clean: so-sensor-clean:
cron.present: cron.present:

View File

@@ -29,26 +29,9 @@ fi
interface="$1" interface="$1"
shift shift
tcpdump -i $interface -ddd $@ | tail -n+2 |
# Capture tcpdump output and exit code while read line; do
tcpdump_output=$(tcpdump -i "$interface" -ddd "$@" 2>&1)
tcpdump_exit=$?
if [ $tcpdump_exit -ne 0 ]; then
echo "$tcpdump_output" >&2
exit $tcpdump_exit
fi
# Process the output, skipping the first line
echo "$tcpdump_output" | tail -n+2 | while read -r line; do
cols=( $line ) cols=( $line )
printf "%04x%02x%02x%08x" "${cols[0]}" "${cols[1]}" "${cols[2]}" "${cols[3]}" printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
done done
# Check if the pipeline succeeded
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
exit 1
fi
echo "" echo ""
exit 0

View File

@@ -220,22 +220,12 @@ compare_es_versions() {
} }
copy_new_files() { copy_new_files() {
# Define files to exclude from deletion (relative to their respective base directories)
local EXCLUDE_FILES=(
"salt/hypervisor/soc_hypervisor.yaml"
)
# Build rsync exclude arguments
local EXCLUDE_ARGS=()
for file in "${EXCLUDE_FILES[@]}"; do
EXCLUDE_ARGS+=(--exclude="$file")
done
# Copy new files over to the salt dir # Copy new files over to the salt dir
cd $UPDATE_DIR cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}" rsync -a salt $DEFAULT_SALT_DIR/ --delete
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}" rsync -a pillar $DEFAULT_SALT_DIR/ --delete
chown -R socore:socore $DEFAULT_SALT_DIR/ chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp cd /tmp
} }
@@ -395,7 +385,7 @@ is_manager_node() {
} }
is_sensor_node() { is_sensor_node() {
# Check to see if this is a sensor node # Check to see if this is a sensor (forward) node
is_single_node_grid && return 0 is_single_node_grid && return 0
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
} }
@@ -451,7 +441,8 @@ lookup_grain() {
lookup_role() { lookup_role() {
id=$(lookup_grain id) id=$(lookup_grain id)
echo "${id##*_}" pieces=($(echo $id | tr '_' ' '))
echo ${pieces[1]}
} }
is_feature_enabled() { is_feature_enabled() {
@@ -554,39 +545,21 @@ run_check_net_err() {
} }
wait_for_salt_minion() { wait_for_salt_minion() {
local minion="$1" local minion="$1"
local max_wait="${2:-30}" local timeout="${2:-5}"
local interval="${3:-2}" local logfile="${3:-'/dev/stdout'}"
local logfile="${4:-'/dev/stdout'}" retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local elapsed=0 local attempt=0
# each attempts would take about 15 seconds
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting for salt-minion '$minion' to be ready..." local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
while [ $elapsed -lt $max_wait ]; do attempt=$((attempt+1))
# Check if service is running if [[ $attempt -eq $maxAttempts ]]; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if salt-minion service is running" return 1
if ! systemctl is-active --quiet salt-minion; then fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service not running (elapsed: ${elapsed}s)" sleep 10
sleep $interval done
elapsed=$((elapsed + interval)) return 0
continue
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service is running"
# Check if minion responds to ping
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if $minion responds to ping"
if salt "$minion" test.ping --timeout=3 --out=json 2>> "$logfile" | grep -q "true"; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion '$minion' is connected and ready!"
return 0
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting... (${elapsed}s / ${max_wait}s)"
sleep $interval
elapsed=$((elapsed + interval))
done
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - ERROR: salt-minion '$minion' not ready after $max_wait seconds"
return 1
} }
salt_minion_count() { salt_minion_count() {

View File

@@ -25,6 +25,7 @@ container_list() {
if [ $MANAGERCHECK == 'so-import' ]; then if [ $MANAGERCHECK == 'so-import' ]; then
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-elasticsearch" "so-elasticsearch"
"so-idstools"
"so-influxdb" "so-influxdb"
"so-kibana" "so-kibana"
"so-kratos" "so-kratos"
@@ -48,6 +49,7 @@ container_list() {
"so-elastic-fleet-package-registry" "so-elastic-fleet-package-registry"
"so-elasticsearch" "so-elasticsearch"
"so-idh" "so-idh"
"so-idstools"
"so-influxdb" "so-influxdb"
"so-kafka" "so-kafka"
"so-kibana" "so-kibana"
@@ -60,6 +62,8 @@ container_list() {
"so-soc" "so-soc"
"so-steno" "so-steno"
"so-strelka-backend" "so-strelka-backend"
"so-strelka-filestream"
"so-strelka-frontend"
"so-strelka-manager" "so-strelka-manager"
"so-suricata" "so-suricata"
"so-telegraf" "so-telegraf"
@@ -67,6 +71,7 @@ container_list() {
) )
else else
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-idstools"
"so-elasticsearch" "so-elasticsearch"
"so-logstash" "so-logstash"
"so-nginx" "so-nginx"

View File

@@ -129,8 +129,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -161,9 +159,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error while parsing document for index \[.ds-logs-kratos-so-.*object mapping for \[file\]" # false positive (mapping error occuring BEFORE kratos index has rolled over in 2.4.210)
fi fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -226,7 +222,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
fi fi
RESULT=0 RESULT=0
@@ -273,13 +268,6 @@ for log_file in $(cat /tmp/log_check_files); do
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
check_for_errors check_for_errors
done done
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
if [[ -f /var/log/messages ]]; then
status "Checking log file /var/log/messages"
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
RESULT=1
fi
fi
# Cleanup temp files # Cleanup temp files
rm -f /tmp/log_check_files rm -f /tmp/log_check_files

View File

@@ -85,7 +85,7 @@ function suricata() {
docker run --rm \ docker run --rm \
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \ -v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \ -v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
-v /opt/so/rules/suricata/:/etc/suricata/rules:ro \ -v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \
-v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${LOG_PATH}:/var/log/suricata/:rw \
-v ${NSM_PATH}/:/nsm/:rw \ -v ${NSM_PATH}/:/nsm/:rw \
-v "$PCAP:/input.pcap:ro" \ -v "$PCAP:/input.pcap:ro" \
@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
status "- assigning unique identifier to import: $HASH" status "- assigning unique identifier to import: $HASH"
pcap_data=$(pcapinfo "${PCAP}") pcap_data=$(pcapinfo "${PCAP}")
if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
status "- this PCAP file is invalid; skipping" status "- this PCAP file is invalid; skipping"
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1)) INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
else else
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
HASHES="${HASHES} ${HASH}" HASHES="${HASHES} ${HASH}"
fi fi
START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}') START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}') END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
status "- found PCAP data spanning dates $START through $END" status "- found PCAP data spanning dates $START through $END"
# compare $START to $START_OLDEST # compare $START to $START_OLDEST

View File

@@ -3,16 +3,29 @@
{# we only want this state to run it is CentOS #} {# we only want this state to run it is CentOS #}
{% if GLOBALS.os == 'OEL' %} {% if GLOBALS.os == 'OEL' %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
{% set manager = GLOBALS.manager %}
{% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set trusttheca_text = global_ca_text[0] %}
{% set ca_server = global_ca_server[0] %}
trusted_ca: trusted_ca:
file.managed: x509.pem_managed:
- name: /etc/pki/ca-trust/source/anchors/ca.crt - name: /etc/pki/ca-trust/source/anchors/ca.crt
- source: salt://ca/files/ca.crt - text: {{ trusttheca_text }}
update_ca_certs: update_ca_certs:
cmd.run: cmd.run:
- name: update-ca-trust - name: update-ca-trust
- onchanges: - onchanges:
- file: trusted_ca - x509: trusted_ca
{% else %} {% else %}

View File

@@ -24,6 +24,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-idstools':
final_octet: 25
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-influxdb': 'so-influxdb':
final_octet: 26 final_octet: 26
port_bindings: port_bindings:

View File

@@ -6,9 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# docker service requires the ca.crt # include ssl since docker service requires the intca
include: include:
- ca - ssl
dockergroup: dockergroup:
group.present: group.present:
@@ -89,9 +89,10 @@ docker_running:
- enable: True - enable: True
- watch: - watch:
- file: docker_daemon - file: docker_daemon
- x509: trusttheca
- require: - require:
- file: docker_daemon - file: docker_daemon
- file: trusttheca - x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present # Reserve OS ports for Docker proxy in case boot settings are not already applied/present

View File

@@ -41,6 +41,7 @@ docker:
forcedType: "[]string" forcedType: "[]string"
so-elastic-fleet: *dockerOptions so-elastic-fleet: *dockerOptions
so-elasticsearch: *dockerOptions so-elasticsearch: *dockerOptions
so-idstools: *dockerOptions
so-influxdb: *dockerOptions so-influxdb: *dockerOptions
so-kibana: *dockerOptions so-kibana: *dockerOptions
so-kratos: *dockerOptions so-kratos: *dockerOptions
@@ -101,4 +102,4 @@ docker:
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
so-zeek: *dockerOptions so-zeek: *dockerOptions
so-kafka: *dockerOptions so-kafka: *dockerOptions

View File

@@ -60,7 +60,7 @@ so-elastalert:
- watch: - watch:
- file: elastaconf - file: elastaconf
- onlyif: - onlyif:
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #} - "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
delete_so-elastalert_so-status.disabled: delete_so-elastalert_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ca
- elasticagent.config - elasticagent.config
- elasticagent.sostatus - elasticagent.sostatus
@@ -56,10 +55,8 @@ so-elastic-agent:
{% endif %} {% endif %}
- require: - require:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
- watch: - watch:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
delete_so-elastic-agent_so-status.disabled: delete_so-elastic-agent_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -1,34 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{# advanced config_yaml options for elasticfleet logstash output #}
{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %}
{% set ADV_OUTPUT_LOGSTASH = {} %}
{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %}
{% if v != "" and v is not none %}
{% if k == 'queue_mem_events' %}
{# rename queue_mem_events queue.mem.events #}
{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %}
{% elif k == 'loadbalance' %}
{% if v %}
{# only include loadbalance config when its True #}
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
{% endif %}
{% else %}
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
{% endif %}
{% endif %}
{% endfor %}
{% set LOGSTASH_CONFIG_YAML_RAW = [] %}
{% if ADV_OUTPUT_LOGSTASH %}
{% for k, v in ADV_OUTPUT_LOGSTASH.items() %}
{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %}
{% endfor %}
{% endif %}
{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %}

View File

@@ -95,9 +95,6 @@ soresourcesrepoclone:
- rev: 'main' - rev: 'main'
- depth: 1 - depth: 1
- force_reset: True - force_reset: True
- retry:
attempts: 3
interval: 10
{% endif %} {% endif %}
elasticdefendconfdir: elasticdefendconfdir:

View File

@@ -10,19 +10,12 @@ elasticfleet:
grid_enrollment: '' grid_enrollment: ''
defend_filters: defend_filters:
enable_auto_configuration: False enable_auto_configuration: False
outputs:
logstash:
bulk_max_size: ''
worker: ''
queue_mem_events: ''
timeout: ''
loadbalance: False
compression_level: ''
subscription_integrations: False subscription_integrations: False
auto_upgrade_integrations: False auto_upgrade_integrations: False
logging: logging:
zeek: zeek:
excluded: excluded:
- analyzer
- broker - broker
- capture_loss - capture_loss
- cluster - cluster

View File

@@ -13,11 +13,9 @@
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %} {% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
include: include:
- ca
- logstash.ssl
- elasticfleet.ssl
- elasticfleet.config - elasticfleet.config
- elasticfleet.sostatus - elasticfleet.sostatus
- ssl
{% if grains.role not in ['so-fleet'] %} {% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready # Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
@@ -34,17 +32,6 @@ so-elastic-fleet-auto-configure-logstash-outputs:
- retry: - retry:
attempts: 4 attempts: 4
interval: 30 interval: 30
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
so-elastic-fleet-auto-configure-logstash-outputs-force:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
- retry:
attempts: 4
interval: 30
- onchanges:
- x509: etc_elasticfleet_logstash_crt
- x509: elasticfleet_kafka_crt
{% endif %} {% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection # If enabled, automatically update Fleet Server URLs & ES Connection
@@ -135,11 +122,6 @@ so-elastic-fleet:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca
- x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt
- require:
- file: trusttheca
- x509: etc_elasticfleet_key - x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt - x509: etc_elasticfleet_crt
{% endif %} {% endif %}
@@ -153,18 +135,12 @@ so-elastic-fleet-package-statefile:
so-elastic-fleet-package-upgrade: so-elastic-fleet-package-upgrade:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade - name: /usr/sbin/so-elastic-fleet-package-upgrade
- retry:
attempts: 3
interval: 10
- onchanges: - onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt - file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations: so-elastic-fleet-integrations:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load - name: /usr/sbin/so-elastic-fleet-integration-policy-load
- retry:
attempts: 3
interval: 10
so-elastic-agent-grid-upgrade: so-elastic-agent-grid-upgrade:
cmd.run: cmd.run:
@@ -176,11 +152,7 @@ so-elastic-agent-grid-upgrade:
so-elastic-fleet-integration-upgrade: so-elastic-fleet-integration-upgrade:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade - name: /usr/sbin/so-elastic-fleet-integration-upgrade
- retry:
attempts: 3
interval: 10
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
so-elastic-fleet-addon-integrations: so-elastic-fleet-addon-integrations:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load - name: /usr/sbin/so-elastic-fleet-optional-integrations-load

View File

@@ -2,7 +2,7 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-zeek-logs", "name": "import-zeek-logs",
@@ -10,31 +10,19 @@
"description": "Zeek Import logs", "description": "Zeek Import logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/zeek/logs/*.log" "/nsm/import/*/zeek/logs/*.log"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }

View File

@@ -11,51 +11,36 @@
{%- endif -%} {%- endif -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "kratos-logs", "name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/kratos/kratos.log" "/opt/so/log/kratos/kratos.log"
], ],
"data_stream.dataset": "kratos", "data_stream.dataset": "kratos",
"pipeline": "kratos", "tags": ["so-kratos"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
{%- if valid_identities -%} {%- if valid_identities -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
{%- else -%} {%- else -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
{%- endif -%} {%- endif -%}
"tags": [ "custom": "pipeline: kratos"
"so-kratos"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -2,38 +2,28 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"id": "zeek-logs",
"name": "zeek-logs", "name": "zeek-logs",
"namespace": "so", "namespace": "so",
"description": "Zeek logs", "description": "Zeek logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/zeek/logs/current/*.log" "/nsm/zeek/logs/current/*.log"
], ],
"data_stream.dataset": "zeek", "data_stream.dataset": "zeek",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -41,4 +31,4 @@
}, },
"force": true "force": true
} }
{%- endraw -%} {%- endraw -%}

View File

@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "9.0.2", "version": "8.18.1",
"requires_root": true "requires_root": true
}, },
"enabled": true, "enabled": true,

View File

@@ -40,7 +40,7 @@
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/elasticsearch/*.json" "/opt/so/log/elasticsearch/*.log"
] ]
} }
}, },

View File

@@ -1,43 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "hydra-logs", "name": "hydra-logs",
"namespace": "so",
"description": "Hydra logs", "description": "Hydra logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/hydra/hydra.log" "/opt/so/log/hydra/hydra.log"
], ],
"data_stream.dataset": "hydra", "data_stream.dataset": "hydra",
"pipeline": "hydra", "tags": ["so-hydra"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"exclude_files": [ "custom": "pipeline: hydra"
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"tags": [
"so-hydra"
],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -45,5 +28,3 @@
}, },
"force": true "force": true
} }

View File

@@ -1,44 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "idh-logs", "name": "idh-logs",
"namespace": "so",
"description": "IDH integration", "description": "IDH integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/idh/opencanary.log" "/nsm/idh/opencanary.log"
], ],
"data_stream.dataset": "idh", "data_stream.dataset": "idh",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,46 +1,33 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-evtx-logs", "name": "import-evtx-logs",
"namespace": "so",
"description": "Import Windows EVTX logs", "description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/evtx/*.json" "/nsm/import/*/evtx/*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "custom": "",
"exclude_files": [ "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.5.4\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.5.4\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.5.4\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
], ]
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,45 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-suricata-logs", "name": "import-suricata-logs",
"namespace": "so",
"description": "Import Suricata logs", "description": "Import Suricata logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/suricata/eve*.json" "/nsm/import/*/suricata/eve*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
"ignore_older": "72h", "custom": "pipeline: suricata.common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,17 +1,18 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "rita-logs", "name": "rita-logs",
"namespace": "so",
"description": "RITA Logs", "description": "RITA Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
@@ -19,28 +20,15 @@
"/nsm/rita/exploded-dns.csv", "/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv" "/nsm/rita/long-connections.csv"
], ],
"data_stream.dataset": "rita", "exclude_files": [],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"tags": [],
"recursive_glob": true,
"ignore_older": "72h", "ignore_older": "72h",
"clean_inactive": -1, "data_stream.dataset": "rita",
"harvester_limit": 0, "tags": [],
"fingerprint": false, "processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"fingerprint_offset": 0, "custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, }
"force": true
} }

View File

@@ -1,41 +1,29 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "so-ip-mappings", "name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings", "description": "IP Description mappings",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/custom-mappings/ip-descriptions.csv" "/nsm/custom-mappings/ip-descriptions.csv"
], ],
"data_stream.dataset": "hostnamemappings", "data_stream.dataset": "hostnamemappings",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"tags": [ "tags": [
"so-ip-mappings" "so-ip-mappings"
], ],
"recursive_glob": true, "processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"clean_inactive": -1, "custom": ""
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -43,3 +31,5 @@
}, },
"force": true "force": true
} }

View File

@@ -1,44 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-auth-sync-logs", "name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs", "description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sync.log" "/opt/so/log/soc/sync.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync", "processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"tags": [], "custom": "pipeline: common"
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,48 +1,35 @@
{ {
"policy_id": "so-grid-nodes_general",
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-detections-logs", "name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs", "description": "Security Onion Console - Detections Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log", "/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log" "/opt/so/log/soc/detections_runtime-status_yara.log"
], ],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "tags": [
"so-soc" "so-soc"
], ],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"ignore_older": "72h", "custom": "pipeline: common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,46 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-salt-relay-logs", "name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs", "description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/salt-relay.log" "/opt/so/log/soc/salt-relay.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay", "processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,44 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-sensoroni-logs", "name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs", "description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/sensoroni/sensoroni.log" "/opt/so/log/sensoroni/sensoroni.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,46 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-server-logs", "name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs", "description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sensoroni-server.log" "/opt/so/log/soc/sensoroni-server.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,44 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "strelka-logs", "name": "strelka-logs",
"description": "Strelka Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"description": "Strelka logs",
"policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/strelka/log/strelka.log" "/nsm/strelka/log/strelka.log"
], ],
"data_stream.dataset": "strelka", "data_stream.dataset": "strelka",
"pipeline": "strelka.file",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"clean_inactive": -1, "custom": "pipeline: strelka.file"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -1,44 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "suricata-logs", "name": "suricata-logs",
"namespace": "so",
"description": "Suricata integration", "description": "Suricata integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/suricata/eve*.json" "/nsm/suricata/eve*.json"
], ],
"data_stream.dataset": "filestream.generic", "data_stream.dataset": "suricata",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"clean_inactive": -1, "custom": "pipeline: suricata.common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }

View File

@@ -2,32 +2,26 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%} {%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
{% if grains.role == 'so-heavynode' %} {%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% endif %}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% set AGENT_EXISTS = salt['file.file_exists']('/opt/Elastic/Agent/elastic-agent') %} {% if not AGENT_STATUS %}
{% if not AGENT_STATUS or not AGENT_EXISTS %}
pull_agent_installer:
file.managed:
- name: /opt/so/so-elastic-agent_linux_amd64
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- mode: 755
- makedirs: True
{% if grains.role not in ['so-heavynode'] %}
run_installer: run_installer:
cmd.run: cmd.script:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} -force - name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- cwd: /opt/so - cwd: /opt/so
- retry: - args: -token={{ GRIDNODETOKENGENERAL }}
attempts: 3 - retry: True
interval: 20 {% else %}
run_installer:
cmd.script:
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- cwd: /opt/so
- args: -token={{ GRIDNODETOKENHEAVY }}
- retry: True
{% endif %}
cleanup_agent_installer:
file.absent:
- name: /opt/so/so-elastic-agent_linux_amd64
{% endif %} {% endif %}

View File

@@ -21,7 +21,6 @@
'azure_application_insights.app_state': 'azure.app_state', 'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing', 'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function', 'azure_functions.metrics': 'azure.function',
'azure_ai_foundry.metrics': 'azure.ai_foundry',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset', 'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm', 'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance', 'azure_metrics.container_instance': 'azure.container_instance',
@@ -122,9 +121,6 @@
"phases": { "phases": {
"cold": { "cold": {
"actions": { "actions": {
"allocate":{
"number_of_replicas": ""
},
"set_priority": {"priority": 0} "set_priority": {"priority": 0}
}, },
"min_age": "60d" "min_age": "60d"
@@ -141,31 +137,12 @@
"max_age": "30d", "max_age": "30d",
"max_primary_shard_size": "50gb" "max_primary_shard_size": "50gb"
}, },
"forcemerge":{
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 100} "set_priority": {"priority": 100}
}, },
"min_age": "0ms" "min_age": "0ms"
}, },
"warm": { "warm": {
"actions": { "actions": {
"allocate": {
"number_of_replicas": ""
},
"forcemerge": {
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 50} "set_priority": {"priority": 50}
}, },
"min_age": "30d" "min_age": "30d"

View File

@@ -50,46 +50,6 @@ elasticfleet:
global: True global: True
forcedType: bool forcedType: bool
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
outputs:
logstash:
bulk_max_size:
description: The maximum number of events to bulk in a single Logstash request.
global: True
forcedType: int
advanced: True
helpLink: elastic-fleet.html
worker:
description: The number of workers per configured host publishing events.
global: True
forcedType: int
advanced: true
helpLink: elastic-fleet.html
queue_mem_events:
title: queued events
description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output.
global: True
forcedType: int
advanced: True
helpLink: elastic-fleet.html
timeout:
description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s
regex: ^[0-9]+s$
advanced: True
global: True
helpLink: elastic-fleet.html
loadbalance:
description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive.
forcedType: bool
advanced: True
global: True
helpLink: elastic-fleet.html
compression_level:
description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression).
regex: ^[1-9]$
forcedType: int
advanced: True
global: True
helpLink: elastic-fleet.html
server: server:
custom_fqdn: custom_fqdn:
description: Custom FQDN for Agents to connect to. One per line. description: Custom FQDN for Agents to connect to. One per line.

View File

@@ -1,186 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
# Start -- Elastic Fleet Host Cert
etc_elasticfleet_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-server.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-server.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-server.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
efperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- group: 939
chownelasticfleetcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Host Cert
{% endif %} # endif is for not including HeavyNodes & Receivers
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
etc_elasticfleet_agent_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-agent.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
- prereq:
- x509: etc_elasticfleet_agent_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_agent_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-agent.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-agent.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_agent_key
efagentperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- group: 939
chownelasticfleetagentcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetagentkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
elasticfleet_kafka_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
- prereq:
- x509: elasticfleet_kafka_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-kafka.crt
- ca_server: {{ CA.server }}
- signing_policy: kafka
- private_key: /etc/pki/elasticfleet-kafka.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_cert_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.crt
- mode: 640
- user: 947
- group: 939
elasticfleet_kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.key
- mode: 640
- user: 947
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -23,13 +23,6 @@ fi
# Define a banner to separate sections # Define a banner to separate sections
banner="=========================================================================" banner="========================================================================="
fleet_api() {
local QUERYPATH=$1
shift
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
}
elastic_fleet_integration_check() { elastic_fleet_integration_check() {
AGENT_POLICY=$1 AGENT_POLICY=$1
@@ -46,9 +39,7 @@ elastic_fleet_integration_create() {
JSON_STRING=$1 JSON_STRING=$1
if ! fleet_api "package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
@@ -65,10 +56,7 @@ elastic_fleet_integration_remove() {
'{"packagePolicyIds":[$INTEGRATIONID]}' '{"packagePolicyIds":[$INTEGRATIONID]}'
) )
if ! fleet_api "package_policies/delete" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo "Error: Unable to delete '$NAME' from '$AGENT_POLICY'"
return 1
fi
} }
elastic_fleet_integration_update() { elastic_fleet_integration_update() {
@@ -77,9 +65,7 @@ elastic_fleet_integration_update() {
JSON_STRING=$2 JSON_STRING=$2
if ! fleet_api "package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPUT -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_integration_policy_upgrade() { elastic_fleet_integration_policy_upgrade() {
@@ -91,83 +77,78 @@ elastic_fleet_integration_policy_upgrade() {
'{"packagePolicyIds":[$INTEGRATIONID]}' '{"packagePolicyIds":[$INTEGRATIONID]}'
) )
if ! fleet_api "package_policies/upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_package_version_check() { elastic_fleet_package_version_check() {
PACKAGE=$1 PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
if output=$(fleet_api "epm/packages/$PACKAGE"); then
echo "$output" | jq -r '.item.version'
else
echo "Error: Failed to get current package version for '$PACKAGE'"
return 1
fi
} }
elastic_fleet_package_latest_version_check() { elastic_fleet_package_latest_version_check() {
PACKAGE=$1 PACKAGE=$1
if output=$(fleet_api "epm/packages/$PACKAGE"); then if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" --fail); then
if version=$(jq -e -r '.item.latestVersion' <<< $output); then if version=$(jq -e -r '.item.latestVersion' <<< $output); then
echo "$version" echo "$version"
fi fi
else else
echo "Error: Failed to get latest version for '$PACKAGE'" echo "Error: Failed to get latest version for $PACKAGE"
return 1
fi fi
} }
elastic_fleet_package_install() { elastic_fleet_package_install() {
PKG=$1 PKG=$1
VERSION=$2 VERSION=$2
if ! fleet_api "epm/packages/$PKG/$VERSION" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}'; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
return 1
fi
} }
elastic_fleet_bulk_package_install() { elastic_fleet_bulk_package_install() {
BULK_PKG_LIST=$1 BULK_PKG_LIST=$1
if ! fleet_api "epm/packages/_bulk" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$BULK_PKG_LIST; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$1 "localhost:5601/api/fleet/epm/packages/_bulk"
return 1 }
fi
elastic_fleet_package_is_installed() {
PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
} }
elastic_fleet_installed_packages() { elastic_fleet_installed_packages() {
if ! fleet_api "epm/packages/installed?perPage=500"; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
return 1
fi
} }
elastic_fleet_agent_policy_ids() { elastic_fleet_agent_policy_ids() {
if output=$(fleet_api "agent_policies"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
echo "$output" | jq -r .items[].id if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve agent policies." echo "Error: Failed to retrieve agent policies."
return 1 exit 1
fi
}
elastic_fleet_agent_policy_names() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi fi
} }
elastic_fleet_integration_policy_names() { elastic_fleet_integration_policy_names() {
AGENT_POLICY=$1 AGENT_POLICY=$1
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
echo "$output" | jq -r .item.package_policies[].name if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'." echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
return 1 exit 1
fi fi
} }
elastic_fleet_integration_policy_package_name() { elastic_fleet_integration_policy_package_name() {
AGENT_POLICY=$1 AGENT_POLICY=$1
INTEGRATION=$2 INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name' if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'." echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
return 1 exit 1
fi fi
} }
@@ -175,32 +156,32 @@ elastic_fleet_integration_policy_package_version() {
AGENT_POLICY=$1 AGENT_POLICY=$1
INTEGRATION=$2 INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" --fail); then
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< "$output"); then if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< $output); then
echo "$version" echo "$version"
fi fi
else else
echo "Error: Failed to retrieve integration version for '$INTEGRATION' in policy '$AGENT_POLICY'" echo "Error: Failed to retrieve agent policy $AGENT_POLICY"
return 1 exit 1
fi fi
} }
elastic_fleet_integration_id() { elastic_fleet_integration_id() {
AGENT_POLICY=$1 AGENT_POLICY=$1
INTEGRATION=$2 INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id' if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'." echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
return 1 exit 1
fi fi
} }
elastic_fleet_integration_policy_dryrun_upgrade() { elastic_fleet_integration_policy_dryrun_upgrade() {
INTEGRATION_ID=$1 INTEGRATION_ID=$1
if ! fleet_api "package_policies/upgrade/dryrun" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -XPOST -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
if [ $? -ne 0 ]; then
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'." echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
return 1 exit 1
fi fi
} }
@@ -209,18 +190,25 @@ elastic_fleet_policy_create() {
NAME=$1 NAME=$1
DESC=$2 DESC=$2
FLEETSERVER=$3 FLEETSERVER=$3
TIMEOUT=$4 TIMEOUT=$4
JSON_STRING=$( jq -n \ JSON_STRING=$( jq -n \
--arg NAME "$NAME" \ --arg NAME "$NAME" \
--arg DESC "$DESC" \ --arg DESC "$DESC" \
--arg TIMEOUT $TIMEOUT \ --arg TIMEOUT $TIMEOUT \
--arg FLEETSERVER "$FLEETSERVER" \ --arg FLEETSERVER "$FLEETSERVER" \
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}' '{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
) )
# Create Fleet Policy # Create Fleet Policy
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_policy_update() {
POLICYID=$1
JSON_STRING=$2
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}

View File

@@ -8,7 +8,6 @@
. /usr/sbin/so-elastic-fleet-common . /usr/sbin/so-elastic-fleet-common
ERROR=false
# Manage Elastic Defend Integration for Initial Endpoints Policy # Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do do
@@ -16,20 +15,9 @@ do
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n" printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
ERROR=true
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
ERROR=true
continue
fi
fi fi
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi

View File

@@ -25,9 +25,5 @@ for POLICYNAME in $POLICY; do
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json) .name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Now update the integration policy using the modified JSON # Now update the integration policy using the modified JSON
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"; then elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
# exit 1 on failure to update fleet integration policies, let salt handle retries
echo "Failed to update $POLICYNAME.."
exit 1
fi
done done

View File

@@ -13,10 +13,11 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
/usr/sbin/so-elastic-fleet-package-upgrade /usr/sbin/so-elastic-fleet-package-upgrade
# Second, update Fleet Server policies # Second, update Fleet Server policies
/usr/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server /sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
# Third, configure Elastic Defend Integration seperately # Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do do
@@ -24,18 +25,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
done done
@@ -46,18 +39,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
done done
if [[ "$RETURN_CODE" != "1" ]]; then if [[ "$RETURN_CODE" != "1" ]]; then
@@ -71,19 +56,11 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
fi fi
done done
@@ -100,19 +77,11 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION" elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
fi fi
fi fi

View File

@@ -14,7 +14,7 @@ if ! is_manager_node; then
fi fi
# Get current list of Grid Node Agents that need to be upgraded # Get current list of Grid Node Agents that need to be upgraded
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")

View File

@@ -26,7 +26,7 @@ function update_es_urls() {
} }
# Get current list of Fleet Elasticsearch URLs # Get current list of Fleet Elasticsearch URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -24,18 +24,12 @@ fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %}) default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
ERROR=false
for AGENT_POLICY in $agent_policies; do for AGENT_POLICY in $agent_policies; do
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
# this script upgrades default integration packages, exit 1 and let salt handle retrying
exit 1
fi
for INTEGRATION in $integrations; do for INTEGRATION in $integrations; do
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
# Get package name so we know what package to look for when checking the current and latest available version # Get package name so we know what package to look for when checking the current and latest available version
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
exit 1
fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %} {%- if not AUTO_UPGRADE_INTEGRATIONS %}
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
{%- endif %} {%- endif %}
@@ -54,9 +48,7 @@ for AGENT_POLICY in $agent_policies; do
fi fi
# Get integration ID # Get integration ID
if ! INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION"); then INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
exit 1
fi
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
# Dry run of the upgrade # Dry run of the upgrade
@@ -64,23 +56,20 @@ for AGENT_POLICY in $agent_policies; do
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..." echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
echo "Upgrading $INTEGRATION..." echo "Upgrading $INTEGRATION..."
echo "Starting dry run..." echo "Starting dry run..."
if ! DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID"); then DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
exit 1
fi
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors) DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
# If no errors with dry run, proceed with actual upgrade # If no errors with dry run, proceed with actual upgrade
if [[ "$DRYRUN_ERRORS" == "false" ]]; then if [[ "$DRYRUN_ERRORS" == "false" ]]; then
echo "No errors detected. Proceeding with upgrade..." echo "No errors detected. Proceeding with upgrade..."
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
if [ $? -ne 0 ]; then
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'." echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
ERROR=true exit 1
continue
fi fi
else else
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..." echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
ERROR=true exit 1
continue
fi fi
fi fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %} {%- if not AUTO_UPGRADE_INTEGRATIONS %}
@@ -89,7 +78,4 @@ for AGENT_POLICY in $agent_policies; do
fi fi
done done
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi
echo echo

View File

@@ -62,17 +62,9 @@ default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.l
in_use_integrations=() in_use_integrations=()
for AGENT_POLICY in $agent_policies; do for AGENT_POLICY in $agent_policies; do
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
# skip the agent policy if we can't get required info, let salt retry. Integrations loaded by this script are non-default integrations.
echo "Skipping $AGENT_POLICY.. "
continue
fi
for INTEGRATION in $integrations; do for INTEGRATION in $integrations; do
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
echo "Not adding $INTEGRATION, couldn't get package name"
continue
fi
# non-default integrations that are in-use in any policy # non-default integrations that are in-use in any policy
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
in_use_integrations+=("$PACKAGE_NAME") in_use_integrations+=("$PACKAGE_NAME")
@@ -86,7 +78,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list) latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do while read -r package; do
# get package details # get package details
@@ -168,11 +160,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
for file in "${pkg_filename}_"*.json; do for file in "${pkg_filename}_"*.json; do
[ -e "$file" ] || continue [ -e "$file" ] || continue
if ! elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT; then elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT
# integrations loaded my this script are non-essential and shouldn't cause exit, skip them for now next highstate run can retry
echo "Failed to complete a chunk of bulk package installs -- $file "
continue
fi
done done
# cleanup any temp files for chunked package install # cleanup any temp files for chunked package install
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
@@ -180,9 +168,8 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
echo "Elastic integrations don't appear to need installation/updating..." echo "Elastic integrations don't appear to need installation/updating..."
fi fi
# Write out file for generating index/component/ilm templates # Write out file for generating index/component/ilm templates
if latest_installed_package_list=$(elastic_fleet_installed_packages); then latest_installed_package_list=$(elastic_fleet_installed_packages)
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
fi
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
# Refresh installed component template list # Refresh installed component template list
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.') latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')

View File

@@ -3,36 +3,11 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
{%- from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} {% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %}
. /usr/sbin/so-common . /usr/sbin/so-common
FORCE_UPDATE=false
UPDATE_CERTS=false
LOGSTASH_PILLAR_CONFIG_YAML="{{ LOGSTASH_CONFIG_YAML }}"
LOGSTASH_PILLAR_STATE_FILE="/opt/so/state/esfleet_logstash_config_pillar"
while [[ $# -gt 0 ]]; do
case $1 in
-f|--force)
FORCE_UPDATE=true
shift
;;
-c| --certs)
UPDATE_CERTS=true
FORCE_UPDATE=true
shift
;;
*)
echo "Unknown option $1"
echo "Usage: $0 [-f|--force] [-c|--certs]"
exit 1
;;
esac
done
# Only run on Managers # Only run on Managers
if ! is_manager_node; then if ! is_manager_node; then
printf "Not a Manager Node... Exiting" printf "Not a Manager Node... Exiting"
@@ -40,53 +15,8 @@ if ! is_manager_node; then
fi fi
function update_logstash_outputs() { function update_logstash_outputs() {
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then # Generate updated JSON payload
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl') JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
# Revert escaped \\n to \n for jq
LOGSTASH_PILLAR_CONFIG_YAML=$(printf '%b' "$LOGSTASH_PILLAR_CONFIG_YAML")
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
if [[ "$UPDATE_CERTS" != "true" ]]; then
# Reuse existing secret
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--argjson SECRETS "$SECRETS" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else
# Update certs, creating new secret
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
fi
else
if [[ "$UPDATE_CERTS" != "true" ]]; then
# Reuse existing ssl config
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG}')
else
# Update ssl config
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
fi
fi
fi
# Update Logstash Outputs # Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
@@ -95,42 +25,19 @@ function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup # Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl') SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
if [[ "$UPDATE_CERTS" != "true" ]]; then # Update policy when fleet has secrets enabled
# Update policy when fleet has secrets enabled JSON_STRING=$(jq -n \
JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg UPDATEDLIST "$NEW_LIST_JSON" \ --argjson SSL_CONFIG "$SSL_CONFIG" \
--argjson SSL_CONFIG "$SSL_CONFIG" \ --argjson SECRETS "$SECRETS" \
--argjson SECRETS "$SECRETS" \ '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else
# Update certs, creating new secret
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKACA "$KAFKACA" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": {"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"secrets": {"ssl":{"key": $KAFKAKEY }}}')
fi
else else
if [[ "$UPDATE_CERTS" != "true" ]]; then # Update policy when fleet has secrets disabled or policy hasn't been force updated
# Update policy when fleet has secrets disabled or policy hasn't been force updated JSON_STRING=$(jq -n \
JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg UPDATEDLIST "$NEW_LIST_JSON" \ --argjson SSL_CONFIG "$SSL_CONFIG" \
--argjson SSL_CONFIG "$SSL_CONFIG" \ '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
else
# Update ssl config
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKACA "$KAFKACA" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }}')
fi
fi fi
# Update Kafka outputs # Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
@@ -142,7 +49,7 @@ function update_kafka_outputs() {
{% if GLOBALS.pipeline == "KAFKA" %} {% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs # Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -153,7 +60,7 @@ function update_kafka_outputs() {
# Get the current list of kafka outputs & hash them # Get the current list of kafka outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=() declare -a NEW_LIST=()
@@ -168,7 +75,7 @@ function update_kafka_outputs() {
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #} {# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %} {% else %}
# Get current list of Logstash Outputs # Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -176,19 +83,10 @@ function update_kafka_outputs() {
printf "Failed to query for current Logstash Outputs..." printf "Failed to query for current Logstash Outputs..."
exit 1 exit 1
fi fi
# logstash adv config - compare pillar to last state file value
if [[ -f "$LOGSTASH_PILLAR_STATE_FILE" ]]; then
PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML=$(cat "$LOGSTASH_PILLAR_STATE_FILE")
if [[ "$LOGSTASH_PILLAR_CONFIG_YAML" != "$PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML" ]]; then
echo "Logstash pillar config has changed - forcing update"
FORCE_UPDATE=true
fi
echo "$LOGSTASH_PILLAR_CONFIG_YAML" > "$LOGSTASH_PILLAR_STATE_FILE"
fi
# Get the current list of Logstash outputs & hash them # Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=() declare -a NEW_LIST=()
@@ -237,10 +135,10 @@ function update_kafka_outputs() {
# Sort & hash the new list of Logstash Outputs # Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}') NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
# Compare the current & new list of outputs - if different, update the Logstash outputs # Compare the current & new list of outputs - if different, update the Logstash outputs
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n" printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"

View File

@@ -10,16 +10,8 @@
{%- for PACKAGE in SUPPORTED_PACKAGES %} {%- for PACKAGE in SUPPORTED_PACKAGES %}
echo "Setting up {{ PACKAGE }} package..." echo "Setting up {{ PACKAGE }} package..."
if VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}"); then VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
# packages loaded by this script should never fail to install and REQUIRED before an installation of SO can be considered successful
echo -e "\nERROR: Failed to install default integration package -- $PACKAGE $VERSION"
exit 1
fi
else
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
exit 1
fi
echo echo
{%- endfor %} {%- endfor %}
echo echo

View File

@@ -10,15 +10,8 @@
{%- for PACKAGE in SUPPORTED_PACKAGES %} {%- for PACKAGE in SUPPORTED_PACKAGES %}
echo "Upgrading {{ PACKAGE }} package..." echo "Upgrading {{ PACKAGE }} package..."
if VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}"); then VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}")
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
# exit 1 on failure to upgrade a default package, allow salt to handle retries
echo -e "\nERROR: Failed to upgrade $PACKAGE to version: $VERSION"
exit 1
fi
else
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
fi
echo echo
{%- endfor %} {%- endfor %}
echo echo

View File

@@ -23,17 +23,18 @@ if [[ "$RETURN_CODE" != "0" ]]; then
exit 1 exit 1
fi fi
ALIASES=(.fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest) ALIASES=".fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
for ALIAS in "${ALIASES[@]}"; do for ALIAS in ${ALIASES}
do
# Get all concrete indices from alias # Get all concrete indices from alias
if INDXS_RAW=$(curl -sK /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" --fail 2>/dev/null); then INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
INDXS=$(echo "$INDXS_RAW" | jq -r '.aliases[].indices[]')
# Delete all resolved indices # Delete all resolved indices
for INDX in ${INDXS}; do for INDX in ${INDXS}
do
status "Deleting $INDX" status "Deleting $INDX"
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
done done
fi
done done
# Restarting Kibana... # Restarting Kibana...
@@ -50,61 +51,22 @@ if [[ "$RETURN_CODE" != "0" ]]; then
fi fi
printf "\n### Create ES Token ###\n" printf "\n### Create ES Token ###\n"
if ESTOKEN_RAW=$(fleet_api "service_tokens" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
ESTOKEN=$(echo "$ESTOKEN_RAW" | jq -r .value)
else
echo -e "\nFailed to create ES token..."
exit 1
fi
### Create Outputs, Fleet Policy and Fleet URLs ### ### Create Outputs, Fleet Policy and Fleet URLs ###
# Create the Manager Elasticsearch Output first and set it as the default output # Create the Manager Elasticsearch Output first and set it as the default output
printf "\nAdd Manager Elasticsearch Output...\n" printf "\nAdd Manager Elasticsearch Output...\n"
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]') ESCACRT=$(openssl x509 -in $INTCA)
JSON_STRING=$(jq -n \ JSON_STRING=$( jq -n \
--arg ESCACRT "$ESCACRT" \ --arg ESCACRT "$ESCACRT" \
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}') '{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
echo -e "\nFailed to create so-elasticsearch_manager policy..."
exit 1
fi
printf "\n\n" printf "\n\n"
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
echo -e "\n failed to update so-manager_elasticsearch"
exit 1
fi
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
status "Verifying so-manager_elasticsearch policy is configured as the current default"
# Grab the fleet-default-output policy instead of so-manager_elasticsearch, because a weird state can exist where both fleet-default-output & so-elasticsearch_manager can be set as the active default output for logs / metrics. Resulting in logs not ingesting on import/eval nodes
if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
else
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
exit 1
fi
else
# fleet-output-policy is created automatically by fleet when started. Should always exist on any installation type
echo -e "\nDefault fleet-default-output policy doesn't exist...\n"
exit 1
fi
# Create the Manager Fleet Server Host Agent Policy # Create the Manager Fleet Server Host Agent Policy
# This has to be done while the Elasticsearch Output is set to the default Output # This has to be done while the Elasticsearch Output is set to the default Output
printf "Create Manager Fleet Server Policy...\n" printf "Create Manager Fleet Server Policy...\n"
if ! elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"; then elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
echo -e "\n Failed to create Manager fleet server policy..."
exit 1
fi
# Modify the default integration policy to update the policy_id with the correct naming # Modify the default integration policy to update the policy_id with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" ' UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
@@ -112,10 +74,7 @@ UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json) .name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Add the Fleet Server Integration to the new Fleet Policy # Add the Fleet Server Integration to the new Fleet Policy
if ! elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"; then elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
echo -e "\nFailed to create Fleet server integration for Manager.."
exit 1
fi
# Now we can create the Logstash Output and set it to to be the default Output # Now we can create the Logstash Output and set it to to be the default Output
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n" printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
@@ -127,12 +86,9 @@ JSON_STRING=$( jq -n \
--arg LOGSTASHCRT "$LOGSTASHCRT" \ --arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \ --arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCA "$LOGSTASHCA" \ --arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}' '{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
) )
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo -e "\nFailed to create logstash fleet output"
exit 1
fi
printf "\n\n" printf "\n\n"
{%- endif %} {%- endif %}
@@ -150,10 +106,7 @@ else
fi fi
## This array replaces whatever URLs are currently configured ## This array replaces whatever URLs are currently configured
if ! fleet_api "fleet_server_hosts" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo -e "\nFailed to add manager fleet URL"
exit 1
fi
printf "\n\n" printf "\n\n"
### Create Policies & Associated Integration Configuration ### ### Create Policies & Associated Integration Configuration ###
@@ -164,22 +117,13 @@ printf "\n\n"
/usr/sbin/so-elasticsearch-templates-load /usr/sbin/so-elasticsearch-templates-load
# Initial Endpoints Policy # Initial Endpoints Policy
if ! elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"; then elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
echo -e "\nFailed to create endpoints-initial policy..."
exit 1
fi
# Grid Nodes - General Policy # Grid Nodes - General Policy
if ! elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"; then elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"
echo -e "\nFailed to create so-grid-nodes_general policy..."
exit 1
fi
# Grid Nodes - Heavy Node Policy # Grid Nodes - Heavy Node Policy
if ! elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"; then elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"
echo -e "\nFailed to create so-grid-nodes_heavy policy..."
exit 1
fi
# Load Integrations for default policies # Load Integrations for default policies
so-elastic-fleet-integration-policy-load so-elastic-fleet-integration-policy-load
@@ -191,34 +135,14 @@ JSON_STRING=$( jq -n \
'{"name":$NAME,"host":$URL,"is_default":true}' '{"name":$NAME,"host":$URL,"is_default":true}'
) )
if ! fleet_api "agent_download_sources" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo -e "\nFailed to update Elastic Agent artifact URL"
exit 1
fi
### Finalization ### ### Finalization ###
# Query for Enrollment Tokens for default policies # Query for Enrollment Tokens for default policies
if ENDPOINTSENROLLMENTOKEN_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
ENDPOINTSENROLLMENTOKEN=$(echo "$ENDPOINTSENROLLMENTOKEN_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') GRIDNODESENROLLMENTOKENGENERAL=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
else GRIDNODESENROLLMENTOKENHEAVY=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
echo -e "\nFailed to query for Endpoints enrollment token"
exit 1
fi
if GRIDNODESENROLLMENTOKENGENERAL_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
GRIDNODESENROLLMENTOKENGENERAL=$(echo "$GRIDNODESENROLLMENTOKENGENERAL_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
else
echo -e "\nFailed to query for Grid nodes - General enrollment token"
exit 1
fi
if GRIDNODESENROLLMENTOKENHEAVY_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
GRIDNODESENROLLMENTOKENHEAVY=$(echo "$GRIDNODESENROLLMENTOKENHEAVY_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
else
echo -e "\nFailed to query for Grid nodes - Heavy enrollment token"
exit 1
fi
# Store needed data in minion pillar # Store needed data in minion pillar
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
@@ -241,11 +165,9 @@ printf '%s\n'\
"" >> "$global_pillar_file" "" >> "$global_pillar_file"
# Call Elastic-Fleet Salt State # Call Elastic-Fleet Salt State
printf "\nApplying elasticfleet state"
salt-call state.apply elasticfleet queue=True salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node # Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers so-elastic-agent-gen-installers
printf "\nApplying elasticfleet.install_agent_grid state"
salt-call state.apply elasticfleet.install_agent_grid queue=True salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0 exit 0

View File

@@ -23,7 +23,7 @@ function update_fleet_urls() {
} }
# Get current list of Fleet Server URLs # Get current list of Fleet Server URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")

View File

@@ -47,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg KAFKACA "$KAFKACA" \ --arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
@@ -67,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\ --arg ENABLED_DISABLED "$ENABLED_DISABLED"\
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
--argjson HOSTS "$HOSTS" \ --argjson HOSTS "$HOSTS" \
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"

View File

@@ -26,14 +26,14 @@ catrustscript:
GLOBALS: {{ GLOBALS }} GLOBALS: {{ GLOBALS }}
{% endif %} {% endif %}
elasticsearch_cacerts: cacertz:
file.managed: file.managed:
- name: /opt/so/conf/ca/cacerts - name: /opt/so/conf/ca/cacerts
- source: salt://elasticsearch/cacerts - source: salt://elasticsearch/cacerts
- user: 939 - user: 939
- group: 939 - group: 939
elasticsearch_capems: capemz:
file.managed: file.managed:
- name: /opt/so/conf/ca/tls-ca-bundle.pem - name: /opt/so/conf/ca/tls-ca-bundle.pem
- source: salt://elasticsearch/tls-ca-bundle.pem - source: salt://elasticsearch/tls-ca-bundle.pem

View File

@@ -5,6 +5,11 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
- elasticsearch.ca
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}

View File

@@ -1,6 +1,6 @@
elasticsearch: elasticsearch:
enabled: false enabled: false
version: 9.0.8 version: 8.18.6
index_clean: true index_clean: true
config: config:
action: action:
@@ -72,8 +72,6 @@ elasticsearch:
actions: actions:
set_priority: set_priority:
priority: 0 priority: 0
allocate:
number_of_replicas: ""
min_age: 60d min_age: 60d
delete: delete:
actions: actions:
@@ -86,25 +84,11 @@ elasticsearch:
max_primary_shard_size: 50gb max_primary_shard_size: 50gb
set_priority: set_priority:
priority: 100 priority: 100
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
min_age: 0ms min_age: 0ms
warm: warm:
actions: actions:
set_priority: set_priority:
priority: 50 priority: 50
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
allocate:
number_of_replicas: ""
min_age: 30d min_age: 30d
so-case: so-case:
index_sorting: false index_sorting: false
@@ -261,6 +245,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-detection: so-detection:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -299,19 +284,6 @@ elasticsearch:
hot: hot:
actions: {} actions: {}
min_age: 0ms min_age: 0ms
sos-backup:
index_sorting: false
index_template:
composed_of: []
ignore_missing_component_templates: []
index_patterns:
- sos-backup-*
priority: 501
template:
settings:
index:
number_of_replicas: 0
number_of_shards: 1
so-assistant-chat: so-assistant-chat:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -612,6 +584,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-import: so-import:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -857,11 +830,53 @@ elasticsearch:
composed_of: composed_of:
- agent-mappings - agent-mappings
- dtc-agent-mappings - dtc-agent-mappings
- base-mappings
- dtc-base-mappings
- client-mappings
- dtc-client-mappings
- container-mappings
- destination-mappings
- dtc-destination-mappings
- pb-override-destination-mappings
- dll-mappings
- dns-mappings
- dtc-dns-mappings
- ecs-mappings
- dtc-ecs-mappings
- error-mappings
- event-mappings
- dtc-event-mappings
- file-mappings
- dtc-file-mappings
- group-mappings
- host-mappings - host-mappings
- dtc-host-mappings - dtc-host-mappings
- http-mappings - http-mappings
- dtc-http-mappings - dtc-http-mappings
- log-mappings
- metadata-mappings - metadata-mappings
- network-mappings
- dtc-network-mappings
- observer-mappings
- dtc-observer-mappings
- organization-mappings
- package-mappings
- process-mappings
- dtc-process-mappings
- related-mappings
- rule-mappings
- dtc-rule-mappings
- server-mappings
- service-mappings
- dtc-service-mappings
- source-mappings
- dtc-source-mappings
- pb-override-source-mappings
- threat-mappings
- tls-mappings
- url-mappings
- user_agent-mappings
- dtc-user_agent-mappings
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
data_stream: data_stream:
@@ -917,6 +932,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-hydra: so-hydra:
close: 30 close: 30
delete: 365 delete: 365
@@ -1027,6 +1043,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-lists: so-lists:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -1110,8 +1127,6 @@ elasticsearch:
actions: actions:
set_priority: set_priority:
priority: 0 priority: 0
allocate:
number_of_replicas: ""
min_age: 60d min_age: 60d
delete: delete:
actions: actions:
@@ -1124,25 +1139,11 @@ elasticsearch:
max_primary_shard_size: 50gb max_primary_shard_size: 50gb
set_priority: set_priority:
priority: 100 priority: 100
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
min_age: 0ms min_age: 0ms
warm: warm:
actions: actions:
set_priority: set_priority:
priority: 50 priority: 50
allocate:
number_of_replicas: ""
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
min_age: 30d min_age: 30d
so-logs-detections_x_alerts: so-logs-detections_x_alerts:
index_sorting: false index_sorting: false
@@ -1990,70 +1991,6 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
so-logs-elasticsearch_x_server:
index_sorting: false
index_template:
composed_of:
- logs-elasticsearch.server@package
- logs-elasticsearch.server@custom
- so-fleet_integrations.ip_mappings-1
- so-fleet_globals-1
- so-fleet_agent_id_verification-1
data_stream:
allow_custom_routing: false
hidden: false
ignore_missing_component_templates:
- logs-elasticsearch.server@custom
index_patterns:
- logs-elasticsearch.server-*
priority: 501
template:
mappings:
_meta:
managed: true
managed_by: security_onion
package:
name: elastic_agent
settings:
index:
lifecycle:
name: so-logs-elasticsearch.server-logs
mapping:
total_fields:
limit: 5000
number_of_replicas: 0
sort:
field: '@timestamp'
order: desc
policy:
_meta:
managed: true
managed_by: security_onion
package:
name: elastic_agent
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 60d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-endpoint_x_actions: so-logs-endpoint_x_actions:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -3122,6 +3059,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-logs-system_x_application: so-logs-system_x_application:
index_sorting: false index_sorting: false
index_template: index_template:

View File

@@ -14,9 +14,6 @@
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} {% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
include: include:
- ca
- elasticsearch.ca
- elasticsearch.ssl
- elasticsearch.config - elasticsearch.config
- elasticsearch.sostatus - elasticsearch.sostatus
@@ -64,7 +61,11 @@ so-elasticsearch:
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
- /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro - /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro
{% if GLOBALS.is_manager %}
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro - /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% endif %}
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
@@ -81,21 +82,22 @@ so-elasticsearch:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca - file: cacertz
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- require: - require:
- file: trusttheca
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- file: eslog4jfile - file: eslog4jfile
- file: nsmesdir - file: nsmesdir
- file: eslogdir - file: eslogdir
- file: cacertz
- x509: /etc/pki/elasticsearch.crt
- x509: /etc/pki/elasticsearch.key
- file: elasticp12perms - file: elasticp12perms
{% if GLOBALS.is_manager %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
- cmd: auth_users_roles_inode - cmd: auth_users_roles_inode
- cmd: auth_users_inode - cmd: auth_users_inode

View File

@@ -23,7 +23,6 @@
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}}, {"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}}, {"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } } { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }

View File

@@ -1,90 +1,9 @@
{ {
"description": "kratos", "description" : "kratos",
"processors": [ "processors" : [
{ {"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
"set": { {"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
"field": "audience", {"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
"value": "access", { "pipeline": { "name": "common" } }
"override": false, ]
"ignore_failure": true
}
},
{
"set": {
"field": "event.dataset",
"ignore_empty_value": true,
"ignore_failure": true,
"value": "kratos.{{{audience}}}",
"media_type": "text/plain"
}
},
{
"set": {
"field": "event.action",
"ignore_failure": true,
"copy_from": "msg"
}
},
{
"rename": {
"field": "http_request",
"target_field": "http.request",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http_response",
"target_field": "http.response",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.path",
"target_field": "http.uri",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.query",
"target_field": "http.query",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.headers.user-agent",
"target_field": "http.useragent",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "common"
}
}
]
} }

View File

@@ -1,79 +1,15 @@
{ {
"description": "suricata.alert", "description" : "suricata.alert",
"processors": [ "processors" : [
{ { "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } },
"set": { { "set": { "field": "tags","value": "alert" }},
"if": "ctx.event?.imported != true", { "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
"field": "_index", { "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
"value": "logs-suricata.alerts-so" { "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
} { "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
}, { "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
{ { "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
"set": { { "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
"field": "tags", { "pipeline": { "name": "common.nids" } }
"value": "alert" ]
}
},
{
"rename": {
"field": "message2.alert",
"target_field": "rule",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.signature",
"target_field": "rule.name",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.ref",
"target_field": "rule.version",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.signature_id",
"target_field": "rule.uuid",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.signature_id",
"target_field": "rule.signature",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.payload_printable",
"target_field": "network.data.decoded",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"dissect": {
"field": "rule.rule",
"pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"pipeline": {
"name": "common.nids"
}
}
]
} }

View File

@@ -1,155 +1,30 @@
{ {
"description": "suricata.common", "description" : "suricata.common",
"processors": [ "processors" : [
{ { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
"json": { { "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
"field": "message", { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
"target_field": "message2", { "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
"ignore_failure": true { "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
} { "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
}, { "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
{ { "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
"rename": { { "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
"field": "message2.pkt_src", { "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
"target_field": "network.packet_source", { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
} { "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
}, { "set": { "field": "observer.name", "value": "{{agent.name}}" } },
{ { "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
"rename": { { "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
"field": "message2.proto", { "remove":{ "field": "agent", "ignore_failure": true } },
"target_field": "network.transport", {"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}},
"ignore_failure": true {
} "script": {
}, "source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
{ "ignore_failure": false
"rename": { }
"field": "message2.in_iface", },
"target_field": "observer.ingress.interface.name", { "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
"ignore_failure": true ]
} }
},
{
"rename": {
"field": "message2.flow_id",
"target_field": "log.id.uid",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.src_ip",
"target_field": "source.ip",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.src_port",
"target_field": "source.port",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.dest_ip",
"target_field": "destination.ip",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.dest_port",
"target_field": "destination.port",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.vlan",
"target_field": "network.vlan.id",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.community_id",
"target_field": "network.community_id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.xff",
"target_field": "xff.ip",
"ignore_missing": true
}
},
{
"set": {
"field": "event.dataset",
"value": "{{ message2.event_type }}"
}
},
{
"set": {
"field": "observer.name",
"value": "{{agent.name}}"
}
},
{
"set": {
"field": "event.ingested",
"value": "{{@timestamp}}"
}
},
{
"date": {
"field": "message2.timestamp",
"target_field": "@timestamp",
"formats": [
"ISO8601",
"UNIX"
],
"timezone": "UTC",
"ignore_failure": true
}
},
{
"remove": {
"field": "agent",
"ignore_failure": true
}
},
{
"append": {
"field": "related.ip",
"value": [
"{{source.ip}}",
"{{destination.ip}}"
],
"allow_duplicates": false,
"ignore_failure": true
}
},
{
"script": {
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
"ignore_failure": false
}
},
{
"rename": {
"field": "message2.capture_file",
"target_field": "suricata.capture_file",
"ignore_missing": true
}
},
{
"pipeline": {
"if": "ctx?.event?.dataset != null",
"name": "suricata.{{event.dataset}}"
}
}
]
}

View File

@@ -1,136 +1,21 @@
{ {
"description": "suricata.dns", "description" : "suricata.dns",
"processors": [ "processors" : [
{ { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
"rename": { { "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
"field": "message2.proto", { "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
"target_field": "network.transport", { "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
"ignore_missing": true { "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
} { "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
}, { "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
{ { "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
"rename": { { "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
"field": "message2.app_proto", { "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
"target_field": "network.protocol", { "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
"ignore_missing": true { "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
} { "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
}, { "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
{ { "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
"rename": { { "pipeline": { "name": "common" } }
"field": "message2.dns.type", ]
"target_field": "dns.query.type", }
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.tx_id",
"target_field": "dns.tx_id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.id",
"target_field": "dns.id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.version",
"target_field": "dns.version",
"ignore_missing": true
}
},
{
"pipeline": {
"name": "suricata.dnsv3",
"ignore_missing_pipeline": true,
"if": "ctx?.dns?.version != null && ctx?.dns?.version == 3",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.dns.rrname",
"target_field": "dns.query.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.rrtype",
"target_field": "dns.query.type_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.flags",
"target_field": "dns.flags",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.qr",
"target_field": "dns.qr",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.rd",
"target_field": "dns.recursion.desired",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.ra",
"target_field": "dns.recursion.available",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.opcode",
"target_field": "dns.opcode",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.rcode",
"target_field": "dns.response.code_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.grouped.A",
"target_field": "dns.answers.data",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.grouped.CNAME",
"target_field": "dns.answers.name",
"ignore_missing": true
}
},
{
"pipeline": {
"if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')",
"name": "dns.tld"
}
},
{
"pipeline": {
"name": "common"
}
}
]
}

View File

@@ -1,56 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.dns.queries",
"target_field": "dns.queries",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}"
}
},
{
"script": {
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}"
}
},
{
"foreach": {
"field": "dns.queries",
"processor": {
"rename": {
"field": "_ingest._value.rrname",
"target_field": "_ingest._value.name",
"ignore_missing": true
}
},
"ignore_failure": true
}
},
{
"foreach": {
"field": "dns.queries",
"processor": {
"rename": {
"field": "_ingest._value.rrtype",
"target_field": "_ingest._value.type_name",
"ignore_missing": true
}
},
"ignore_failure": true
}
},
{
"pipeline": {
"name": "suricata.tld",
"ignore_missing_pipeline": true,
"if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0",
"ignore_failure": true
}
}
]
}

View File

@@ -1,52 +0,0 @@
{
"processors": [
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}",
"ignore_failure": true
}
}
]
}

View File

@@ -1,61 +0,0 @@
{
"description": "zeek.analyzer",
"processors": [
{
"set": {
"field": "event.dataset",
"value": "analyzer"
}
},
{
"remove": {
"field": [
"host"
],
"ignore_failure": true
}
},
{
"json": {
"field": "message",
"target_field": "message2",
"ignore_failure": true
}
},
{
"set": {
"field": "network.protocol",
"copy_from": "message2.analyzer_name",
"ignore_empty_value": true,
"if": "ctx?.message2?.analyzer_kind == 'protocol'"
}
},
{
"set": {
"field": "network.protocol",
"ignore_empty_value": true,
"if": "ctx?.message2?.analyzer_kind != 'protocol'",
"copy_from": "message2.proto"
}
},
{
"lowercase": {
"field": "network.protocol",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.failure_reason",
"target_field": "error.reason",
"ignore_missing": true
}
},
{
"pipeline": {
"name": "zeek.common"
}
}
]
}

View File

@@ -1,227 +1,35 @@
{ {
"description": "zeek.dns", "description" : "zeek.dns",
"processors": [ "processors" : [
{ { "set": { "field": "event.dataset", "value": "dns" } },
"set": { { "remove": { "field": ["host"], "ignore_failure": true } },
"field": "event.dataset", { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
"value": "dns" { "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
} { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
}, { "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } },
{ { "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } },
"remove": { { "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } },
"field": [ { "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } },
"host" { "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } },
], { "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } },
} { "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } },
}, { "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } },
{ { "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } },
"json": { { "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } },
"field": "message", { "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
"target_field": "message2", { "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
} { "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
}, { "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}},
{ { "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}},
"dot_expander": { { "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }},
"field": "id.orig_h", { "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } },
"path": "message2", { "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
} { "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
}, { "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } },
{ { "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
"rename": { { "pipeline": { "name": "zeek.common" } }
"field": "message2.proto", ]
"target_field": "network.transport",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.trans_id",
"target_field": "dns.id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rtt",
"target_field": "event.duration",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.query",
"target_field": "dns.query.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qclass",
"target_field": "dns.query.class",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qclass_name",
"target_field": "dns.query.class_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qtype",
"target_field": "dns.query.type",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qtype_name",
"target_field": "dns.query.type_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rcode",
"target_field": "dns.response.code",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rcode_name",
"target_field": "dns.response.code_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.AA",
"target_field": "dns.authoritative",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.TC",
"target_field": "dns.truncated",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.RD",
"target_field": "dns.recursion.desired",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.RA",
"target_field": "dns.recursion.available",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.Z",
"target_field": "dns.reserved",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.answers",
"target_field": "dns.answers.name",
"ignore_missing": true
}
},
{
"foreach": {
"field": "dns.answers.name",
"processor": {
"pipeline": {
"name": "common.ip_validation"
}
},
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
"ignore_failure": true
}
},
{
"foreach": {
"field": "temp._valid_ips",
"processor": {
"append": {
"field": "dns.resolved_ip",
"allow_duplicates": false,
"value": "{{{_ingest._value}}}",
"ignore_failure": true
}
},
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }",
"ignore_failure": true
}
},
{
"remove": {
"field": [
"temp"
],
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.TTLs",
"target_field": "dns.ttls",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rejected",
"target_field": "dns.query.rejected",
"ignore_missing": true
}
},
{
"script": {
"lang": "painless",
"source": "ctx.dns.query.length = ctx.dns.query.name.length()",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx._index == 'so-zeek'",
"field": "_index",
"value": "so-zeek_dns",
"override": true
}
},
{
"pipeline": {
"if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')",
"name": "dns.tld"
}
},
{
"pipeline": {
"name": "zeek.common"
}
}
]
} }

View File

@@ -0,0 +1,20 @@
{
"description" : "zeek.dpd",
"processors" : [
{ "set": { "field": "event.dataset", "value": "dpd" } },
{ "remove": { "field": ["host"], "ignore_failure": true } },
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } },
{ "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } },
{ "pipeline": { "name": "zeek.common" } }
]
}

View File

@@ -20,28 +20,8 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.action.type = Delete appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = /var/log/elasticsearch appender.rolling.strategy.action.basepath = /var/log/elasticsearch
appender.rolling.strategy.action.condition.type = IfFileName appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = *.log.gz appender.rolling.strategy.action.condition.glob = *.gz
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
appender.rolling.strategy.action.condition.nested_condition.age = 7D appender.rolling.strategy.action.condition.nested_condition.age = 7D
appender.rolling_json.type = RollingFile
appender.rolling_json.name = rolling_json
appender.rolling_json.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.json
appender.rolling_json.layout.type = ECSJsonLayout
appender.rolling_json.layout.dataset = elasticsearch.server
appender.rolling_json.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.json.gz
appender.rolling_json.policies.type = Policies
appender.rolling_json.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling_json.policies.time.interval = 1
appender.rolling_json.policies.time.modulate = true
appender.rolling_json.strategy.type = DefaultRolloverStrategy
appender.rolling_json.strategy.action.type = Delete
appender.rolling_json.strategy.action.basepath = /var/log/elasticsearch
appender.rolling_json.strategy.action.condition.type = IfFileName
appender.rolling_json.strategy.action.condition.glob = *.json.gz
appender.rolling_json.strategy.action.condition.nested_condition.type = IfLastModified
appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
rootLogger.level = info rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling rootLogger.appenderRef.rolling.ref = rolling
rootLogger.appenderRef.rolling_json.ref = rolling_json

View File

@@ -131,47 +131,6 @@ elasticsearch:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
forcedType: string
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
cold: cold:
min_age: min_age:
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
@@ -185,12 +144,6 @@ elasticsearch:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
warm: warm:
min_age: min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
@@ -205,52 +158,6 @@ elasticsearch:
forcedType: int forcedType: int
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
delete: delete:
min_age: min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
@@ -380,47 +287,6 @@ elasticsearch:
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
forcedType: string
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
warm: warm:
min_age: min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
@@ -448,52 +314,6 @@ elasticsearch:
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
cold: cold:
min_age: min_age:
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
@@ -510,12 +330,6 @@ elasticsearch:
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
delete: delete:
min_age: min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
@@ -578,7 +392,6 @@ elasticsearch:
so-logs-elastic_agent_x_metricbeat: *indexSettings so-logs-elastic_agent_x_metricbeat: *indexSettings
so-logs-elastic_agent_x_osquerybeat: *indexSettings so-logs-elastic_agent_x_osquerybeat: *indexSettings
so-logs-elastic_agent_x_packetbeat: *indexSettings so-logs-elastic_agent_x_packetbeat: *indexSettings
so-logs-elasticsearch_x_server: *indexSettings
so-metrics-endpoint_x_metadata: *indexSettings so-metrics-endpoint_x_metadata: *indexSettings
so-metrics-endpoint_x_metrics: *indexSettings so-metrics-endpoint_x_metrics: *indexSettings
so-metrics-endpoint_x_policy: *indexSettings so-metrics-endpoint_x_policy: *indexSettings

View File

@@ -1,66 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
# Create a cert for elasticsearch
elasticsearch_key:
x509.private_key_managed:
- name: /etc/pki/elasticsearch.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticsearch_crt:
x509.certificate_managed:
- name: /etc/pki/elasticsearch.crt
- ca_server: {{ CA.server }}
- signing_policy: registry
- private_key: /etc/pki/elasticsearch.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:
- x509: /etc/pki/elasticsearch.key
elastickeyperms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.key
- mode: 640
- group: 930
elasticp12perms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.p12
- mode: 640
- group: 930
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -61,55 +61,5 @@
{% do settings.index_template.template.settings.index.pop('sort') %} {% do settings.index_template.template.settings.index.pop('sort') %}
{% endif %} {% endif %}
{% endif %} {% endif %}
{# advanced ilm actions #}
{% if settings.policy is defined and settings.policy.phases is defined %}
{% set PHASE_NAMES = ["hot", "warm", "cold"] %}
{% for P in PHASE_NAMES %}
{% if settings.policy.phases[P] is defined and settings.policy.phases[P].actions is defined %}
{% set PHASE = settings.policy.phases[P].actions %}
{# remove allocate action if number_of_replicas isn't configured #}
{% if PHASE.allocate is defined %}
{% if PHASE.allocate.number_of_replicas is not defined or PHASE.allocate.number_of_replicas == "" %}
{% do PHASE.pop('allocate', none) %}
{% endif %}
{% endif %}
{# start shrink action #}
{% if PHASE.shrink is defined %}
{% if PHASE.shrink.method is defined %}
{% if PHASE.shrink.method == 'COUNT' and PHASE.shrink.number_of_shards is defined and PHASE.shrink.number_of_shards %}
{# remove max_primary_shard_size value when doing shrink operation by count vs size #}
{% do PHASE.shrink.pop('max_primary_shard_size', none) %}
{% elif PHASE.shrink.method == 'SIZE' and PHASE.shrink.max_primary_shard_size is defined and PHASE.shrink.max_primary_shard_size %}
{# remove number_of_shards value when doing shrink operation by size vs count #}
{% do PHASE.shrink.pop('number_of_shards', none) %}
{% else %}
{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #}
{% do PHASE.pop('shrink', none) %}
{% endif %}
{% endif %}
{% endif %}
{# always remove shrink method since its only used for SOC config, not in the actual ilm policy #}
{% if PHASE.shrink is defined %}
{% do PHASE.shrink.pop('method', none) %}
{% endif %}
{# end shrink action #}
{# start force merge #}
{% if PHASE.forcemerge is defined %}
{% if PHASE.forcemerge.index_codec is defined and PHASE.forcemerge.index_codec %}
{% do PHASE.forcemerge.update({'index_codec': 'best_compression'}) %}
{% else %}
{% do PHASE.forcemerge.pop('index_codec', none) %}
{% endif %}
{% if PHASE.forcemerge.max_num_segments is not defined or not PHASE.forcemerge.max_num_segments %}
{# max_num_segments is empty, drop it #}
{% do PHASE.pop('forcemerge', none) %}
{% endif %}
{% endif %}
{# end force merge #}
{% endif %}
{% endfor %}
{% endif %}
{% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %} {% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
{% endfor %} {% endfor %}

View File

@@ -841,10 +841,6 @@
"type": "long" "type": "long"
} }
} }
},
"capture_file": {
"type": "keyword",
"ignore_above": 1024
} }
} }
} }

View File

@@ -14,9 +14,8 @@ set -e
# Check to see if we have extracted the ca cert. # Check to see if we have extracted the ca cert.
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
# Make sure symbolic links are followed when copying from container docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
docker cp -L so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker cp -L so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker rm so-elasticsearchca docker rm so-elasticsearchca
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem

View File

@@ -121,7 +121,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
echo "Loading Security Onion index templates..." echo "Loading Security Onion index templates..."
shopt -s extglob shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %} {% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)" pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %} {% else %}
pattern="*" pattern="*"
{% endif %} {% endif %}

View File

@@ -13,7 +13,6 @@
{# Import defaults.yaml for model hardware capabilities #} {# Import defaults.yaml for model hardware capabilities #}
{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %} {% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %}
{% set HYPERVISORMERGED = salt['pillar.get']('hypervisor', default=DEFAULTS.hypervisor, merge=True) %}
{# Get hypervisor nodes from pillar #} {# Get hypervisor nodes from pillar #}
{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %} {% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %}
@@ -31,10 +30,9 @@
{% set model = '' %} {% set model = '' %}
{% if grains %} {% if grains %}
{% set minion_id = grains.keys() | first %} {% set minion_id = grains.keys() | first %}
{% set model = grains[minion_id].get('sosmodel', grains[minion_id].get('byodmodel', '')) %} {% set model = grains[minion_id].get('sosmodel', '') %}
{% endif %} {% endif %}
{% set model_config = DEFAULTS.hypervisor.model.get(model, {}) %}
{% set model_config = HYPERVISORMERGED.model.get(model, {}) %}
{# Get VM list from VMs file #} {# Get VM list from VMs file #}
{% set vms = {} %} {% set vms = {} %}
@@ -58,26 +56,10 @@
{% set role = vm.get('role', '') %} {% set role = vm.get('role', '') %}
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing VM - hostname: ' ~ hostname ~ ', role: ' ~ role) %} {% do salt.log.debug('salt/hypervisor/map.jinja: Processing VM - hostname: ' ~ hostname ~ ', role: ' ~ role) %}
{# Try to load VM configuration from config file first, then .error file if config doesn't exist #} {# Load VM configuration from config file #}
{% set vm_file = 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role %} {% set vm_file = 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role %}
{% set vm_error_file = vm_file ~ '.error' %}
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config file: ' ~ vm_file) %} {% do salt.log.debug('salt/hypervisor/map.jinja: VM config file: ' ~ vm_file) %}
{% import_json vm_file as vm_state %}
{# Check if base config file exists #}
{% set config_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_file) %}
{% set error_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_error_file) %}
{% set vm_state = none %}
{% if config_exists %}
{% import_json vm_file as vm_state %}
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from base file') %}
{% elif error_exists %}
{% import_json vm_error_file as vm_state %}
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from .error file') %}
{% else %}
{% do salt.log.warning('salt/hypervisor/map.jinja: No config or error file found for VM ' ~ hostname ~ '_' ~ role) %}
{% endif %}
{% if vm_state %} {% if vm_state %}
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config content: ' ~ vm_state | tojson) %} {% do salt.log.debug('salt/hypervisor/map.jinja: VM config content: ' ~ vm_state | tojson) %}
{% set vm_data = {'config': vm_state.config} %} {% set vm_data = {'config': vm_state.config} %}
@@ -101,7 +83,7 @@
{% endif %} {% endif %}
{% do vms.update({hostname ~ '_' ~ role: vm_data}) %} {% do vms.update({hostname ~ '_' ~ role: vm_data}) %}
{% else %} {% else %}
{% do salt.log.debug('salt/hypervisor/map.jinja: Skipping VM ' ~ hostname ~ '_' ~ role ~ ' - no config available') %} {% do salt.log.debug('salt/hypervisor/map.jinja: Config file empty: ' ~ vm_file) %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}

View File

@@ -30,9 +30,7 @@
# #
# WARNING: This script will DESTROY all data on the target drives! # WARNING: This script will DESTROY all data on the target drives!
# #
# USAGE: # USAGE: sudo ./so-nvme-raid1.sh
# sudo ./so-nvme-raid1.sh # Normal operation
# sudo ./so-nvme-raid1.sh --force-cleanup # Force cleanup of existing RAID
# #
################################################################# #################################################################
@@ -43,19 +41,6 @@ set -e
RAID_ARRAY_NAME="md0" RAID_ARRAY_NAME="md0"
RAID_DEVICE="/dev/${RAID_ARRAY_NAME}" RAID_DEVICE="/dev/${RAID_ARRAY_NAME}"
MOUNT_POINT="/nsm" MOUNT_POINT="/nsm"
FORCE_CLEANUP=false
# Parse command line arguments
for arg in "$@"; do
case $arg in
--force-cleanup)
FORCE_CLEANUP=true
shift
;;
*)
;;
esac
done
# Function to log messages # Function to log messages
log() { log() {
@@ -70,91 +55,6 @@ check_root() {
fi fi
} }
# Function to force cleanup all RAID components
force_cleanup_raid() {
log "=== FORCE CLEANUP MODE ==="
log "This will destroy all RAID configurations and data on target drives!"
# Stop all MD arrays
log "Stopping all MD arrays"
mdadm --stop --scan 2>/dev/null || true
# Wait for arrays to stop
sleep 2
# Remove any running md devices
for md in /dev/md*; do
if [ -b "$md" ]; then
log "Stopping $md"
mdadm --stop "$md" 2>/dev/null || true
fi
done
# Force cleanup both NVMe drives
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
log "Force cleaning $device"
# Kill any processes using the device
fuser -k "${device}"* 2>/dev/null || true
# Unmount any mounted partitions
for part in "${device}"*; do
if [ -b "$part" ]; then
umount -f "$part" 2>/dev/null || true
fi
done
# Force zero RAID superblocks on partitions
for part in "${device}"p*; do
if [ -b "$part" ]; then
log "Zeroing RAID superblock on $part"
mdadm --zero-superblock --force "$part" 2>/dev/null || true
fi
done
# Zero superblock on the device itself
log "Zeroing RAID superblock on $device"
mdadm --zero-superblock --force "$device" 2>/dev/null || true
# Remove LVM physical volumes
pvremove -ff -y "$device" 2>/dev/null || true
# Wipe all filesystem and partition signatures
log "Wiping all signatures from $device"
wipefs -af "$device" 2>/dev/null || true
# Overwrite the beginning of the drive (partition table area)
log "Clearing partition table on $device"
dd if=/dev/zero of="$device" bs=1M count=10 2>/dev/null || true
# Clear the end of the drive (backup partition table area)
local device_size=$(blockdev --getsz "$device" 2>/dev/null || echo "0")
if [ "$device_size" -gt 0 ]; then
dd if=/dev/zero of="$device" bs=512 seek=$(( device_size - 2048 )) count=2048 2>/dev/null || true
fi
# Force kernel to re-read partition table
blockdev --rereadpt "$device" 2>/dev/null || true
partprobe -s "$device" 2>/dev/null || true
done
# Clear mdadm configuration
log "Clearing mdadm configuration"
echo "DEVICE partitions" > /etc/mdadm.conf
# Remove any fstab entries for the RAID device or mount point
log "Cleaning fstab entries"
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
sed -i "\|${MOUNT_POINT}|d" /etc/fstab
# Wait for system to settle
udevadm settle
sleep 5
log "Force cleanup complete!"
log "Proceeding with RAID setup..."
}
# Function to find MD arrays using specific devices # Function to find MD arrays using specific devices
find_md_arrays_using_devices() { find_md_arrays_using_devices() {
local target_devices=("$@") local target_devices=("$@")
@@ -305,15 +205,10 @@ check_existing_raid() {
fi fi
log "Error: $device appears to be part of an existing RAID array" log "Error: $device appears to be part of an existing RAID array"
log "Old RAID metadata detected but array is not running." log "To reuse this device, you must first:"
log "" log "1. Unmount any filesystems"
log "To fix this, run the script with --force-cleanup:" log "2. Stop the RAID array: mdadm --stop $array_name"
log " sudo $0 --force-cleanup" log "3. Zero the superblock: mdadm --zero-superblock ${device}p1"
log ""
log "Or manually clean up with:"
log "1. Stop any arrays: mdadm --stop --scan"
log "2. Zero superblocks: mdadm --zero-superblock --force ${device}p1"
log "3. Wipe signatures: wipefs -af $device"
exit 1 exit 1
fi fi
done done
@@ -343,7 +238,7 @@ ensure_devices_free() {
done done
# Clear MD superblock # Clear MD superblock
mdadm --zero-superblock --force "${device}"* 2>/dev/null || true mdadm --zero-superblock "${device}"* 2>/dev/null || true
# Remove LVM PV if exists # Remove LVM PV if exists
pvremove -ff -y "$device" 2>/dev/null || true pvremove -ff -y "$device" 2>/dev/null || true
@@ -368,11 +263,6 @@ main() {
# Check if running as root # Check if running as root
check_root check_root
# If force cleanup flag is set, do aggressive cleanup first
if [ "$FORCE_CLEANUP" = true ]; then
force_cleanup_raid
fi
# Check for existing RAID setup # Check for existing RAID setup
check_existing_raid check_existing_raid

View File

@@ -1,591 +0,0 @@
#!/usr/bin/python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
{% if 'vrt' in salt['pillar.get']('features', []) %}
"""
Script for creating and attaching virtual volumes to KVM virtual machines for NSM storage.
This script provides functionality to create pre-allocated raw disk images and attach them
to VMs as virtio-blk devices for high-performance network security monitoring data storage.
The script handles the complete volume lifecycle:
1. Volume Creation: Creates pre-allocated raw disk images using qemu-img
2. Volume Attachment: Attaches volumes to VMs as virtio-blk devices
3. VM Management: Stops/starts VMs as needed during the process
This script is designed to work with Security Onion's virtualization infrastructure and is typically
used during VM provisioning to add dedicated NSM storage volumes.
**Usage:**
so-kvm-create-volume -v <vm_name> -s <size_gb> [-S]
**Options:**
-v, --vm Name of the virtual machine to attach the volume to (required).
-s, --size Size of the volume in GB (required, must be a positive integer).
-S, --start Start the VM after volume creation and attachment (optional).
**Examples:**
1. **Create and Attach 500GB Volume:**
```bash
so-kvm-create-volume -v vm1_sensor -s 500
```
This command creates and attaches a volume with the following settings:
- VM Name: `vm1_sensor`
- Volume Size: `500` GB
- Volume Path: `/nsm/libvirt/volumes/vm1_sensor-nsm-<epoch_timestamp>.img`
- Device: `/dev/vdb` (virtio-blk)
- VM remains stopped after attachment
2. **Create Volume and Start VM:**
```bash
so-kvm-create-volume -v vm2_sensor -s 1000 -S
```
This command creates a volume and starts the VM:
- VM Name: `vm2_sensor`
- Volume Size: `1000` GB (1 TB)
- VM is started after volume attachment due to the `-S` flag
3. **Create Large Volume for Heavy Traffic:**
```bash
so-kvm-create-volume -v vm3_sensor -s 2000 -S
```
This command creates a large volume for high-traffic environments:
- VM Name: `vm3_sensor`
- Volume Size: `2000` GB (2 TB)
- VM is started after attachment
**Notes:**
- The script automatically stops the VM if it's running before creating and attaching the volume.
- Volumes are created with full pre-allocation for optimal performance.
- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `<vm_name>-nsm-<epoch_timestamp>.img`.
- The epoch timestamp ensures unique volume names and prevents conflicts.
- Volumes are attached as `/dev/vdb` using virtio-blk for high performance.
- The script checks available disk space before creating the volume.
- Ownership is set to `qemu:qemu` with permissions `640`.
- Without the `-S` flag, the VM remains stopped after volume attachment.
**Description:**
The `so-kvm-create-volume` script creates and attaches NSM storage volumes using the following process:
1. **Pre-flight Checks:**
- Validates input parameters (VM name, size)
- Checks available disk space in `/nsm/libvirt/volumes/`
- Ensures sufficient space for the requested volume size
2. **VM State Management:**
- Connects to the local libvirt daemon
- Stops the VM if it's currently running
- Retrieves current VM configuration
3. **Volume Creation:**
- Creates volume directory if it doesn't exist
- Uses `qemu-img create` with full pre-allocation
- Sets proper ownership (qemu:qemu) and permissions (640)
- Validates volume creation success
4. **Volume Attachment:**
- Modifies VM's libvirt XML configuration
- Adds disk element with virtio-blk driver
- Configures cache='none' and io='native' for performance
- Attaches volume as `/dev/vdb`
5. **VM Redefinition:**
- Applies the new configuration by redefining the VM
- Optionally starts the VM if requested
- Emits deployment status events for monitoring
6. **Error Handling:**
- Validates all input parameters
- Checks disk space before creation
- Handles volume creation failures
- Handles volume attachment failures
- Provides detailed error messages for troubleshooting
**Exit Codes:**
- `0`: Success
- `1`: An error occurred during execution
**Logging:**
- Logs are written to `/opt/so/log/hypervisor/so-kvm-create-volume.log`
- Both file and console logging are enabled for real-time monitoring
- Log entries include timestamps and severity levels
- Log prefixes: VOLUME:, VM:, HARDWARE:, SPACE:
- Detailed error messages are logged for troubleshooting
"""
import argparse
import sys
import os
import libvirt
import logging
import socket
import subprocess
import pwd
import grp
import time
import xml.etree.ElementTree as ET
from io import StringIO
from so_vm_utils import start_vm, stop_vm
from so_logging_utils import setup_logging
# Get hypervisor name from local hostname
HYPERVISOR = socket.gethostname()
# Volume storage directory
VOLUME_DIR = '/nsm/libvirt/volumes'
# Custom exception classes
class InsufficientSpaceError(Exception):
"""Raised when there is insufficient disk space for volume creation."""
pass
class VolumeCreationError(Exception):
"""Raised when volume creation fails."""
pass
class VolumeAttachmentError(Exception):
"""Raised when volume attachment fails."""
pass
# Custom log handler to capture output
class StringIOHandler(logging.Handler):
def __init__(self):
super().__init__()
self.strio = StringIO()
def emit(self, record):
msg = self.format(record)
self.strio.write(msg + '\n')
def get_value(self):
return self.strio.getvalue()
def parse_arguments():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description='Create and attach a virtual volume to a KVM virtual machine for NSM storage.')
parser.add_argument('-v', '--vm', required=True, help='Name of the virtual machine to attach the volume to.')
parser.add_argument('-s', '--size', type=int, required=True, help='Size of the volume in GB (must be a positive integer).')
parser.add_argument('-S', '--start', action='store_true', help='Start the VM after volume creation and attachment.')
args = parser.parse_args()
# Validate size is positive
if args.size <= 0:
parser.error("Volume size must be a positive integer.")
return args
def check_disk_space(size_gb, logger):
"""
Check if there is sufficient disk space available for volume creation.
Args:
size_gb: Size of the volume in GB
logger: Logger instance
Raises:
InsufficientSpaceError: If there is not enough disk space
"""
try:
stat = os.statvfs(VOLUME_DIR)
# Available space in bytes
available_bytes = stat.f_bavail * stat.f_frsize
# Required space in bytes (add 10% buffer)
required_bytes = size_gb * 1024 * 1024 * 1024 * 1.1
available_gb = available_bytes / (1024 * 1024 * 1024)
required_gb = required_bytes / (1024 * 1024 * 1024)
logger.info(f"SPACE: Available: {available_gb:.2f} GB, Required: {required_gb:.2f} GB")
if available_bytes < required_bytes:
raise InsufficientSpaceError(
f"Insufficient disk space. Available: {available_gb:.2f} GB, Required: {required_gb:.2f} GB"
)
logger.info(f"SPACE: Sufficient disk space available for {size_gb} GB volume")
except OSError as e:
logger.error(f"SPACE: Failed to check disk space: {e}")
raise
def create_volume_file(vm_name, size_gb, logger):
"""
Create a pre-allocated raw disk image for the VM.
Args:
vm_name: Name of the VM
size_gb: Size of the volume in GB
logger: Logger instance
Returns:
Path to the created volume file
Raises:
VolumeCreationError: If volume creation fails
"""
# Generate epoch timestamp for unique volume naming
epoch_timestamp = int(time.time())
# Define volume path with epoch timestamp for uniqueness
volume_path = os.path.join(VOLUME_DIR, f"{vm_name}-nsm-{epoch_timestamp}.img")
# Check if volume already exists (shouldn't be possible with timestamp)
if os.path.exists(volume_path):
logger.error(f"VOLUME: Volume already exists: {volume_path}")
raise VolumeCreationError(f"Volume already exists: {volume_path}")
logger.info(f"VOLUME: Creating {size_gb} GB volume at {volume_path}")
# Create volume using qemu-img with full pre-allocation
try:
cmd = [
'qemu-img', 'create',
'-f', 'raw',
'-o', 'preallocation=full',
volume_path,
f"{size_gb}G"
]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
check=True
)
logger.info(f"VOLUME: Volume created successfully")
if result.stdout:
logger.debug(f"VOLUME: qemu-img output: {result.stdout.strip()}")
except subprocess.CalledProcessError as e:
logger.error(f"VOLUME: Failed to create volume: {e}")
if e.stderr:
logger.error(f"VOLUME: qemu-img error: {e.stderr.strip()}")
raise VolumeCreationError(f"Failed to create volume: {e}")
# Set ownership to qemu:qemu
try:
qemu_uid = pwd.getpwnam('qemu').pw_uid
qemu_gid = grp.getgrnam('qemu').gr_gid
os.chown(volume_path, qemu_uid, qemu_gid)
logger.info(f"VOLUME: Set ownership to qemu:qemu")
except (KeyError, OSError) as e:
logger.error(f"VOLUME: Failed to set ownership: {e}")
raise VolumeCreationError(f"Failed to set ownership: {e}")
# Set permissions to 640
try:
os.chmod(volume_path, 0o640)
logger.info(f"VOLUME: Set permissions to 640")
except OSError as e:
logger.error(f"VOLUME: Failed to set permissions: {e}")
raise VolumeCreationError(f"Failed to set permissions: {e}")
# Verify volume was created
if not os.path.exists(volume_path):
logger.error(f"VOLUME: Volume file not found after creation: {volume_path}")
raise VolumeCreationError(f"Volume file not found after creation: {volume_path}")
volume_size = os.path.getsize(volume_path)
logger.info(f"VOLUME: Volume created: {volume_path} ({volume_size} bytes)")
return volume_path
def attach_volume_to_vm(conn, vm_name, volume_path, logger):
"""
Attach the volume to the VM's libvirt XML configuration.
Args:
conn: Libvirt connection
vm_name: Name of the VM
volume_path: Path to the volume file
logger: Logger instance
Raises:
VolumeAttachmentError: If volume attachment fails
"""
try:
# Get the VM domain
dom = conn.lookupByName(vm_name)
# Get the XML description of the VM
xml_desc = dom.XMLDesc()
root = ET.fromstring(xml_desc)
# Find the devices element
devices_elem = root.find('./devices')
if devices_elem is None:
logger.error("VM: Could not find <devices> element in XML")
raise VolumeAttachmentError("Could not find <devices> element in VM XML")
# Log ALL devices with PCI addresses to find conflicts
logger.info("DISK_DEBUG: Examining ALL devices with PCI addresses")
for device in devices_elem:
address = device.find('./address')
if address is not None and address.get('type') == 'pci':
bus = address.get('bus', 'unknown')
slot = address.get('slot', 'unknown')
function = address.get('function', 'unknown')
logger.info(f"DISK_DEBUG: Device {device.tag}: bus={bus}, slot={slot}, function={function}")
# Log existing disk configuration for debugging
logger.info("DISK_DEBUG: Examining existing disk configuration")
existing_disks = devices_elem.findall('./disk')
for idx, disk in enumerate(existing_disks):
target = disk.find('./target')
source = disk.find('./source')
address = disk.find('./address')
dev_name = target.get('dev') if target is not None else 'unknown'
source_file = source.get('file') if source is not None else 'unknown'
if address is not None:
slot = address.get('slot', 'unknown')
bus = address.get('bus', 'unknown')
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, slot={slot}, bus={bus}")
else:
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, no address element")
# Check if vdb already exists
for disk in devices_elem.findall('./disk'):
target = disk.find('./target')
if target is not None and target.get('dev') == 'vdb':
logger.error("VM: Device vdb already exists in VM configuration")
raise VolumeAttachmentError("Device vdb already exists in VM configuration")
logger.info(f"VM: Attaching volume to {vm_name} as /dev/vdb")
# Create disk element
disk_elem = ET.SubElement(devices_elem, 'disk', attrib={
'type': 'file',
'device': 'disk'
})
# Add driver element
ET.SubElement(disk_elem, 'driver', attrib={
'name': 'qemu',
'type': 'raw',
'cache': 'none',
'io': 'native'
})
# Add source element
ET.SubElement(disk_elem, 'source', attrib={
'file': volume_path
})
# Add target element
ET.SubElement(disk_elem, 'target', attrib={
'dev': 'vdb',
'bus': 'virtio'
})
# Add address element
# Use bus 0x07 with slot 0x00 to ensure NSM volume appears after OS disk (which is on bus 0x04)
# Bus 0x05 is used by memballoon, bus 0x06 is used by rng device
# Libvirt requires slot <= 0 for non-zero buses
# This ensures vda = OS disk, vdb = NSM volume
ET.SubElement(disk_elem, 'address', attrib={
'type': 'pci',
'domain': '0x0000',
'bus': '0x07',
'slot': '0x00',
'function': '0x0'
})
logger.info(f"HARDWARE: Added disk configuration for vdb")
# Log disk ordering after adding new disk
logger.info("DISK_DEBUG: Disk configuration after adding NSM volume")
all_disks = devices_elem.findall('./disk')
for idx, disk in enumerate(all_disks):
target = disk.find('./target')
source = disk.find('./source')
address = disk.find('./address')
dev_name = target.get('dev') if target is not None else 'unknown'
source_file = source.get('file') if source is not None else 'unknown'
if address is not None:
slot = address.get('slot', 'unknown')
bus = address.get('bus', 'unknown')
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, slot={slot}, bus={bus}")
else:
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, no address element")
# Convert XML back to string
new_xml_desc = ET.tostring(root, encoding='unicode')
# Redefine the VM with the new XML
conn.defineXML(new_xml_desc)
logger.info(f"VM: VM redefined with volume attached")
except libvirt.libvirtError as e:
logger.error(f"VM: Failed to attach volume: {e}")
raise VolumeAttachmentError(f"Failed to attach volume: {e}")
except Exception as e:
logger.error(f"VM: Failed to attach volume: {e}")
raise VolumeAttachmentError(f"Failed to attach volume: {e}")
def emit_status_event(vm_name, status):
"""
Emit a deployment status event.
Args:
vm_name: Name of the VM
status: Status message
"""
try:
subprocess.run([
'so-salt-emit-vm-deployment-status-event',
'-v', vm_name,
'-H', HYPERVISOR,
'-s', status
], check=True)
except subprocess.CalledProcessError as e:
# Don't fail the entire operation if status event fails
pass
def main():
"""Main function to orchestrate volume creation and attachment."""
# Set up logging using the so_logging_utils library
string_handler = StringIOHandler()
string_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger = setup_logging(
logger_name='so-kvm-create-volume',
log_file_path='/opt/so/log/hypervisor/so-kvm-create-volume.log',
log_level=logging.INFO,
format_str='%(asctime)s - %(levelname)s - %(message)s'
)
logger.addHandler(string_handler)
vm_name = None
try:
# Parse arguments
args = parse_arguments()
vm_name = args.vm
size_gb = args.size
start_vm_flag = args.start
logger.info(f"VOLUME: Starting volume creation for VM '{vm_name}' with size {size_gb} GB")
# Emit start status event
emit_status_event(vm_name, 'Volume Creation')
# Ensure volume directory exists before checking disk space
try:
os.makedirs(VOLUME_DIR, mode=0o754, exist_ok=True)
qemu_uid = pwd.getpwnam('qemu').pw_uid
qemu_gid = grp.getgrnam('qemu').gr_gid
os.chown(VOLUME_DIR, qemu_uid, qemu_gid)
logger.debug(f"VOLUME: Ensured volume directory exists: {VOLUME_DIR}")
except Exception as e:
logger.error(f"VOLUME: Failed to create volume directory: {e}")
emit_status_event(vm_name, 'Volume Configuration Failed')
sys.exit(1)
# Check disk space
check_disk_space(size_gb, logger)
# Connect to libvirt
try:
conn = libvirt.open(None)
logger.info("VM: Connected to libvirt")
except libvirt.libvirtError as e:
logger.error(f"VM: Failed to open connection to libvirt: {e}")
emit_status_event(vm_name, 'Volume Configuration Failed')
sys.exit(1)
# Stop VM if running
dom = stop_vm(conn, vm_name, logger)
# Create volume file
volume_path = create_volume_file(vm_name, size_gb, logger)
# Attach volume to VM
attach_volume_to_vm(conn, vm_name, volume_path, logger)
# Start VM if -S or --start argument is provided
if start_vm_flag:
dom = conn.lookupByName(vm_name)
start_vm(dom, logger)
logger.info(f"VM: VM '{vm_name}' started successfully")
else:
logger.info("VM: Start flag not provided; VM will remain stopped")
# Close connection
conn.close()
# Emit success status event
emit_status_event(vm_name, 'Volume Configuration')
logger.info(f"VOLUME: Volume creation and attachment completed successfully for VM '{vm_name}'")
except KeyboardInterrupt:
error_msg = "Operation cancelled by user"
logger.error(error_msg)
if vm_name:
emit_status_event(vm_name, 'Volume Configuration Failed')
sys.exit(1)
except InsufficientSpaceError as e:
error_msg = f"SPACE: {str(e)}"
logger.error(error_msg)
if vm_name:
emit_status_event(vm_name, 'Volume Configuration Failed')
sys.exit(1)
except VolumeCreationError as e:
error_msg = f"VOLUME: {str(e)}"
logger.error(error_msg)
if vm_name:
emit_status_event(vm_name, 'Volume Configuration Failed')
sys.exit(1)
except VolumeAttachmentError as e:
error_msg = f"VM: {str(e)}"
logger.error(error_msg)
if vm_name:
emit_status_event(vm_name, 'Volume Configuration Failed')
sys.exit(1)
except Exception as e:
error_msg = f"An error occurred: {str(e)}"
logger.error(error_msg)
if vm_name:
emit_status_event(vm_name, 'Volume Configuration Failed')
sys.exit(1)
if __name__ == '__main__':
main()
{%- else -%}
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
for more information about purchasing a license to enable this feature."
{% endif -%}

65
salt/idstools/config.sls Normal file
View File

@@ -0,0 +1,65 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
include:
- idstools.sync_files
idstoolslogdir:
file.directory:
- name: /opt/so/log/idstools
- user: 939
- group: 939
- makedirs: True
idstools_sbin:
file.recurse:
- name: /usr/sbin
- source: salt://idstools/tools/sbin
- user: 939
- group: 939
- file_mode: 755
# If this is used, exclude so-rule-update
#idstools_sbin_jinja:
# file.recurse:
# - name: /usr/sbin
# - source: salt://idstools/tools/sbin_jinja
# - user: 939
# - group: 939
# - file_mode: 755
# - template: jinja
idstools_so-rule-update:
file.managed:
- name: /usr/sbin/so-rule-update
- source: salt://idstools/tools/sbin_jinja/so-rule-update
- user: 939
- group: 939
- mode: 755
- template: jinja
suricatacustomdirsfile:
file.directory:
- name: /nsm/rules/detect-suricata/custom_file
- user: 939
- group: 939
- makedirs: True
suricatacustomdirsurl:
file.directory:
- name: /nsm/rules/detect-suricata/custom_temp
- user: 939
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,10 @@
idstools:
enabled: False
config:
urls: []
ruleset: ETOPEN
oinkcode: ""
sids:
enabled: []
disabled: []
modify: []

Some files were not shown because too many files have changed in this diff Show More