Compare commits

..

2 Commits

Author SHA1 Message Date
DefensiveDepth 422b4bc4c9 Add local custom Playbooks 2025-09-18 12:22:20 -04:00
DefensiveDepth 6cdd88808a Add local custom Playbooks 2025-09-18 12:07:21 -04:00
429 changed files with 3407 additions and 10772 deletions
+1 -5
View File
@@ -32,10 +32,6 @@ body:
- 2.4.170 - 2.4.170
- 2.4.180 - 2.4.180
- 2.4.190 - 2.4.190
- 2.4.200
- 2.4.201
- 2.4.210
- 2.4.211
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true
@@ -97,7 +93,7 @@ body:
attributes: attributes:
label: Hardware Specs label: Hardware Specs
description: > description: >
Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://securityonion.net/docs/hardware? Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://docs.securityonion.net/en/2.4/hardware.html?
options: options:
- -
- Meets minimum requirements - Meets minimum requirements
+1 -1
View File
@@ -4,7 +4,7 @@ on:
pull_request: pull_request:
paths: paths:
- "salt/sensoroni/files/analyzers/**" - "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin/**" - "salt/manager/tools/sbin"
jobs: jobs:
build: build:
+12 -12
View File
@@ -1,17 +1,17 @@
### 2.4.211-20260407 ISO image released on 2026/04/07 ### 2.4.180-20250916 ISO image released on 2025/09/17
### Download and Verify ### Download and Verify
2.4.211-20260407 ISO image: 2.4.180-20250916 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.211-20260407.iso https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
MD5: 35ECDD0BC10E56874D9F5725CA6C5888 MD5: DE93880E38DE4BE45D05A41E1745CB1F
SHA1: 30CE6CB0ED0059A3260368E4F296B8DBA381F9CD SHA1: AEA6948911E50A4A38E8729E0E965C565402E3FC
SHA256: 185D8CF49CD3BFDD8876B8DDE48343DA90804B0C0EC3EADF0AD90D29C55E72B7 SHA256: C9BD8CA071E43B048ABF9ED145B87935CB1D4BB839B2244A06FAD1BBA8EAC84A
Signature for ISO image: Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.211-20260407.iso.sig https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
Signing key: Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO: Download the signature file for the ISO:
``` ```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.211-20260407.iso.sig wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
``` ```
Download the ISO image: Download the ISO image:
``` ```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.211-20260407.iso wget https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
``` ```
Verify the downloaded ISO image using the signature file: Verify the downloaded ISO image using the signature file:
``` ```
gpg --verify securityonion-2.4.211-20260407.iso.sig securityonion-2.4.211-20260407.iso gpg --verify securityonion-2.4.180-20250916.iso.sig securityonion-2.4.180-20250916.iso
``` ```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below: The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
``` ```
gpg: Signature made Mon 06 Apr 2026 02:58:51 PM EDT using RSA key ID FE507013 gpg: Signature made Tue 16 Sep 2025 06:30:19 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>" gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature! gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner. gpg: There is no indication that the signature belongs to the owner.
@@ -50,4 +50,4 @@ Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network. If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network.
Once you've verified the ISO image, you're ready to proceed to our Installation guide: Once you've verified the ISO image, you're ready to proceed to our Installation guide:
https://securityonion.net/docs/installation https://docs.securityonion.net/en/2.4/installation.html
-1
View File
@@ -1 +0,0 @@
+6 -6
View File
@@ -27,24 +27,24 @@ Config
### Release Notes ### Release Notes
https://securityonion.net/docs/release-notes https://docs.securityonion.net/en/2.4/release-notes.html
### Requirements ### Requirements
https://securityonion.net/docs/hardware https://docs.securityonion.net/en/2.4/hardware.html
### Download ### Download
https://securityonion.net/docs/download https://docs.securityonion.net/en/2.4/download.html
### Installation ### Installation
https://securityonion.net/docs/installation https://docs.securityonion.net/en/2.4/installation.html
### FAQ ### FAQ
https://securityonion.net/docs/faq https://docs.securityonion.net/en/2.4/faq.html
### Feedback ### Feedback
https://securityonion.net/docs/community-support https://docs.securityonion.net/en/2.4/community-support.html
+1 -1
View File
@@ -1 +1 @@
2.4.211 2.4.190
-2
View File
@@ -1,2 +0,0 @@
ca:
server:
+6 -1
View File
@@ -1,6 +1,5 @@
base: base:
'*': '*':
- ca
- global.soc_global - global.soc_global
- global.adv_global - global.adv_global
- docker.soc_docker - docker.soc_docker
@@ -44,6 +43,8 @@ base:
- secrets - secrets
- manager.soc_manager - manager.soc_manager
- manager.adv_manager - manager.adv_manager
- idstools.soc_idstools
- idstools.adv_idstools
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
@@ -116,6 +117,8 @@ base:
- elastalert.adv_elastalert - elastalert.adv_elastalert
- manager.soc_manager - manager.soc_manager
- manager.adv_manager - manager.adv_manager
- idstools.soc_idstools
- idstools.adv_idstools
- soc.soc_soc - soc.soc_soc
- soc.adv_soc - soc.adv_soc
- kibana.soc_kibana - kibana.soc_kibana
@@ -155,6 +158,8 @@ base:
{% endif %} {% endif %}
- secrets - secrets
- healthcheck.standalone - healthcheck.standalone
- idstools.soc_idstools
- idstools.adv_idstools
- kratos.soc_kratos - kratos.soc_kratos
- kratos.adv_kratos - kratos.adv_kratos
- hydra.soc_hydra - hydra.soc_hydra
-91
View File
@@ -1,91 +0,0 @@
#!/opt/saltstack/salt/bin/python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
"""
Salt execution module for hypervisor operations.
This module provides functions for managing hypervisor configurations,
including VM file management.
"""
import json
import logging
import os
log = logging.getLogger(__name__)
__virtualname__ = 'hypervisor'
def __virtual__():
"""
Only load this module if we're on a system that can manage hypervisors.
"""
return __virtualname__
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
"""
Remove a VM entry from the hypervisorVMs file.
Args:
vms_file_path (str): Path to the hypervisorVMs file
vm_hostname (str): Hostname of the VM to remove (without role suffix)
vm_role (str): Role of the VM
Returns:
dict: Result dictionary with success status and message
CLI Example:
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
"""
try:
# Check if file exists
if not os.path.exists(vms_file_path):
msg = f"VMs file not found: {vms_file_path}"
log.error(msg)
return {'result': False, 'comment': msg}
# Read current VMs
with open(vms_file_path, 'r') as f:
content = f.read().strip()
vms = json.loads(content) if content else []
# Find and remove the VM entry
original_count = len(vms)
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
if len(vms) < original_count:
# VM was found and removed, write back to file
with open(vms_file_path, 'w') as f:
json.dump(vms, f, indent=2)
# Set socore:socore ownership (939:939)
os.chown(vms_file_path, 939, 939)
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
log.info(msg)
return {'result': True, 'comment': msg}
else:
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
log.warning(msg)
return {'result': False, 'comment': msg}
except json.JSONDecodeError as e:
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
log.error(msg)
return {'result': False, 'comment': msg}
except Exception as e:
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
log.error(msg)
return {'result': False, 'comment': msg}
+3 -92
View File
@@ -7,14 +7,12 @@
""" """
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and for modifying network configurations within QCOW2 images and adjusting virtual machine hardware settings.
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network, It serves as a Salt interface to the so-qcow2-modify-network and so-kvm-modify-hardware scripts.
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
The module offers three main capabilities: The module offers two main capabilities:
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images 1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough) 2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
3. Volume Management: Create and attach virtual storage volumes for NSM data
This module is intended to work with Security Onion's virtualization infrastructure and is typically This module is intended to work with Security Onion's virtualization infrastructure and is typically
used in conjunction with salt-cloud for VM provisioning and management. used in conjunction with salt-cloud for VM provisioning and management.
@@ -246,90 +244,3 @@ def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False
except Exception as e: except Exception as e:
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e)) log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
raise raise
def create_volume_config(vm_name, size_gb, start=False):
'''
Usage:
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
Options:
vm_name
Name of the virtual machine to attach the volume to
size_gb
Volume size in GB (positive integer)
This determines the capacity of the virtual storage volume
start
Boolean flag to start the VM after volume creation
Optional - defaults to False
Examples:
1. **Create 500GB Volume:**
```bash
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
```
This creates a 500GB virtual volume for NSM storage
2. **Create 1TB Volume and Start VM:**
```bash
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
```
This creates a 1TB volume and starts the VM after attachment
Notes:
- VM must be stopped before volume creation
- Volume is created as a qcow2 image and attached to the VM
- This is an alternative to disk passthrough via modify_hardware_config
- Volume is automatically attached to the VM's libvirt configuration
- Requires so-kvm-create-volume script to be installed
- Volume files are stored in the hypervisor's VM storage directory
Description:
This function creates and attaches a virtual storage volume to a KVM virtual machine
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
This provides an alternative to physical disk passthrough, allowing flexible storage
allocation without requiring dedicated hardware. The VM can optionally be started
after the volume is successfully created and attached.
Exit Codes:
0: Success
1: Invalid parameters
2: VM state error (running when should be stopped)
3: Volume creation error
4: System command error
255: Unexpected error
Logging:
- All operations are logged to the salt minion log
- Log entries are prefixed with 'qcow2 module:'
- Volume creation and attachment operations are logged
- Errors include detailed messages and stack traces
- Final status of volume creation is logged
'''
# Validate size_gb parameter
if not isinstance(size_gb, int) or size_gb <= 0:
raise ValueError('size_gb must be a positive integer.')
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
if start:
cmd.append('-S')
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
ret = {
'retcode': result.returncode,
'stdout': result.stdout,
'stderr': result.stderr
}
if result.returncode != 0:
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
else:
log.info('qcow2 module: Script executed successfully.')
return ret
except Exception as e:
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
raise
+18 -97
View File
@@ -172,15 +172,7 @@ MANAGER_HOSTNAME = socket.gethostname()
def _download_image(): def _download_image():
""" """
Download and validate the Oracle Linux KVM image with retry logic and progress monitoring. Download and validate the Oracle Linux KVM image.
Features:
- Detects stalled downloads (no progress for 30 seconds)
- Retries up to 3 times on failure
- Connection timeout of 30 seconds
- Read timeout of 60 seconds
- Cleans up partial downloads on failure
Returns: Returns:
bool: True if successful or file exists with valid checksum, False on error bool: True if successful or file exists with valid checksum, False on error
""" """
@@ -194,54 +186,25 @@ def _download_image():
log.info("Starting image download process") log.info("Starting image download process")
# Retry configuration
max_attempts = 3
retry_delay = 5 # seconds to wait between retry attempts
stall_timeout = 30 # seconds without progress before considering download stalled
connection_timeout = 30 # seconds to establish connection
read_timeout = 60 # seconds to wait for data chunks
for attempt in range(1, max_attempts + 1):
log.info("Download attempt %d of %d", attempt, max_attempts)
try: try:
# Download file with timeouts # Download file
log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH) log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH)
response = requests.get( response = requests.get(IMAGE_URL, stream=True)
IMAGE_URL,
stream=True,
timeout=(connection_timeout, read_timeout)
)
response.raise_for_status() response.raise_for_status()
# Get total file size for progress tracking # Get total file size for progress tracking
total_size = int(response.headers.get('content-length', 0)) total_size = int(response.headers.get('content-length', 0))
downloaded_size = 0 downloaded_size = 0
last_log_time = 0 last_log_time = 0
last_progress_time = time.time()
last_downloaded_size = 0
# Save file with progress logging and stall detection # Save file with progress logging
with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f: with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192): for chunk in response.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk) f.write(chunk)
downloaded_size += len(chunk) downloaded_size += len(chunk)
current_time = time.time()
# Check for stalled download
if downloaded_size > last_downloaded_size:
# Progress made, reset stall timer
last_progress_time = current_time
last_downloaded_size = downloaded_size
elif current_time - last_progress_time > stall_timeout:
# No progress for stall_timeout seconds
raise Exception(
f"Download stalled: no progress for {stall_timeout} seconds "
f"at {downloaded_size}/{total_size} bytes"
)
# Log progress every second # Log progress every second
current_time = time.time()
if current_time - last_log_time >= 1: if current_time - last_log_time >= 1:
progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0 progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0
log.info("Progress - %.1f%% (%d/%d bytes)", log.info("Progress - %.1f%% (%d/%d bytes)",
@@ -249,50 +212,17 @@ def _download_image():
last_log_time = current_time last_log_time = current_time
# Validate downloaded file # Validate downloaded file
log.info("Download complete, validating checksum...")
if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256): if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256):
log.error("Checksum validation failed on attempt %d", attempt)
os.unlink(IMAGE_PATH) os.unlink(IMAGE_PATH)
if attempt < max_attempts:
log.info("Will retry download...")
continue
else:
log.error("All download attempts failed due to checksum mismatch")
return False return False
log.info("Successfully downloaded and validated Oracle Linux KVM image") log.info("Successfully downloaded and validated Oracle Linux KVM image")
return True return True
except requests.exceptions.Timeout as e:
log.error("Download attempt %d failed: Timeout - %s", attempt, str(e))
if os.path.exists(IMAGE_PATH):
os.unlink(IMAGE_PATH)
if attempt < max_attempts:
log.info("Will retry download in %d seconds...", retry_delay)
time.sleep(retry_delay)
else:
log.error("All download attempts failed due to timeout")
except requests.exceptions.RequestException as e:
log.error("Download attempt %d failed: Network error - %s", attempt, str(e))
if os.path.exists(IMAGE_PATH):
os.unlink(IMAGE_PATH)
if attempt < max_attempts:
log.info("Will retry download in %d seconds...", retry_delay)
time.sleep(retry_delay)
else:
log.error("All download attempts failed due to network errors")
except Exception as e: except Exception as e:
log.error("Download attempt %d failed: %s", attempt, str(e)) log.error("Error downloading hypervisor image: %s", str(e))
if os.path.exists(IMAGE_PATH): if os.path.exists(IMAGE_PATH):
os.unlink(IMAGE_PATH) os.unlink(IMAGE_PATH)
if attempt < max_attempts:
log.info("Will retry download in %d seconds...", retry_delay)
time.sleep(retry_delay)
else:
log.error("All download attempts failed")
return False return False
def _check_ssh_keys_exist(): def _check_ssh_keys_exist():
@@ -489,28 +419,25 @@ def _ensure_hypervisor_host_dir(minion_id: str = None):
log.error(f"Error creating hypervisor host directory: {str(e)}") log.error(f"Error creating hypervisor host directory: {str(e)}")
return False return False
def _apply_dyanno_hypervisor_state(status): def _apply_dyanno_hypervisor_state():
""" """
Apply the soc.dyanno.hypervisor state on the salt master. Apply the soc.dyanno.hypervisor state on the salt master.
This function applies the soc.dyanno.hypervisor state on the salt master This function applies the soc.dyanno.hypervisor state on the salt master
to update the hypervisor annotation and ensure all hypervisor host directories exist. to update the hypervisor annotation and ensure all hypervisor host directories exist.
Args:
status: Status passed to the hypervisor annotation state
Returns: Returns:
bool: True if state was applied successfully, False otherwise bool: True if state was applied successfully, False otherwise
""" """
try: try:
log.info(f"Applying soc.dyanno.hypervisor state on salt master with status: {status}") log.info("Applying soc.dyanno.hypervisor state on salt master")
# Initialize the LocalClient # Initialize the LocalClient
local = salt.client.LocalClient() local = salt.client.LocalClient()
# Target the salt master to apply the soc.dyanno.hypervisor state # Target the salt master to apply the soc.dyanno.hypervisor state
target = MANAGER_HOSTNAME + '_*' target = MANAGER_HOSTNAME + '_*'
state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', f"pillar={{'baseDomain': {{'status': '{status}'}}}}", 'concurrent=True'], tgt_type='glob') state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', "pillar={'baseDomain': {'status': 'PreInit'}}", 'concurrent=True'], tgt_type='glob')
log.debug(f"state_result: {state_result}") log.debug(f"state_result: {state_result}")
# Check if state was applied successfully # Check if state was applied successfully
if state_result: if state_result:
@@ -527,17 +454,17 @@ def _apply_dyanno_hypervisor_state(status):
success = False success = False
if success: if success:
log.info(f"Successfully applied soc.dyanno.hypervisor state with status: {status}") log.info("Successfully applied soc.dyanno.hypervisor state")
return True return True
else: else:
log.error(f"Failed to apply soc.dyanno.hypervisor state with status: {status}") log.error("Failed to apply soc.dyanno.hypervisor state")
return False return False
else: else:
log.error(f"No response from salt master when applying soc.dyanno.hypervisor state with status: {status}") log.error("No response from salt master when applying soc.dyanno.hypervisor state")
return False return False
except Exception as e: except Exception as e:
log.error(f"Error applying soc.dyanno.hypervisor state with status: {status}: {str(e)}") log.error(f"Error applying soc.dyanno.hypervisor state: {str(e)}")
return False return False
def _apply_cloud_config_state(): def _apply_cloud_config_state():
@@ -671,6 +598,11 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
log.warning("Failed to apply salt.cloud.config state, continuing with setup") log.warning("Failed to apply salt.cloud.config state, continuing with setup")
# We don't return an error here as we want to continue with the setup process # We don't return an error here as we want to continue with the setup process
# Apply the soc.dyanno.hypervisor state on the salt master
if not _apply_dyanno_hypervisor_state():
log.warning("Failed to apply soc.dyanno.hypervisor state, continuing with setup")
# We don't return an error here as we want to continue with the setup process
log.info("Starting setup_environment in setup_hypervisor runner") log.info("Starting setup_environment in setup_hypervisor runner")
# Check if environment is already set up # Check if environment is already set up
@@ -684,12 +616,9 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
# Handle image setup if needed # Handle image setup if needed
if not image_valid: if not image_valid:
_apply_dyanno_hypervisor_state('ImageDownloadStart')
log.info("Starting image download/validation process") log.info("Starting image download/validation process")
if not _download_image(): if not _download_image():
log.error("Image download failed") log.error("Image download failed")
# Update hypervisor annotation with failure status
_apply_dyanno_hypervisor_state('ImageDownloadFailed')
return { return {
'success': False, 'success': False,
'error': 'Image download failed', 'error': 'Image download failed',
@@ -702,8 +631,6 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
log.info("Setting up SSH keys") log.info("Setting up SSH keys")
if not _setup_ssh_keys(): if not _setup_ssh_keys():
log.error("SSH key setup failed") log.error("SSH key setup failed")
# Update hypervisor annotation with failure status
_apply_dyanno_hypervisor_state('SSHKeySetupFailed')
return { return {
'success': False, 'success': False,
'error': 'SSH key setup failed', 'error': 'SSH key setup failed',
@@ -728,12 +655,6 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
success = vm_result.get('success', False) success = vm_result.get('success', False)
log.info("Setup environment completed with status: %s", "SUCCESS" if success else "FAILED") log.info("Setup environment completed with status: %s", "SUCCESS" if success else "FAILED")
# Update hypervisor annotation with success status
if success:
_apply_dyanno_hypervisor_state('PreInit')
else:
_apply_dyanno_hypervisor_state('SetupFailed')
# If setup was successful and we have a minion_id, run highstate # If setup was successful and we have a minion_id, run highstate
if success and minion_id: if success and minion_id:
log.info("Running highstate on hypervisor %s", minion_id) log.info("Running highstate on hypervisor %s", minion_id)
+24 -5
View File
@@ -15,7 +15,11 @@
'salt.minion-check', 'salt.minion-check',
'sensoroni', 'sensoroni',
'salt.lasthighstate', 'salt.lasthighstate',
'salt.minion', 'salt.minion'
] %}
{% set ssl_states = [
'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'schedule', 'schedule',
@@ -24,7 +28,7 @@
{% set manager_states = [ {% set manager_states = [
'salt.master', 'salt.master',
'ca.server', 'ca',
'registry', 'registry',
'manager', 'manager',
'nginx', 'nginx',
@@ -34,6 +38,8 @@
'hydra', 'hydra',
'elasticfleet', 'elasticfleet',
'elastic-fleet-package-registry', 'elastic-fleet-package-registry',
'idstools',
'suricata.manager',
'utility' 'utility'
] %} ] %}
@@ -71,24 +77,28 @@
{# Map role-specific states #} {# Map role-specific states #}
{% set role_states = { {% set role_states = {
'so-eval': ( 'so-eval': (
ssl_states +
manager_states + manager_states +
sensor_states + sensor_states +
elastic_stack_states | reject('equalto', 'logstash') | list + elastic_stack_states | reject('equalto', 'logstash') | list
['logstash.ssl']
), ),
'so-heavynode': ( 'so-heavynode': (
ssl_states +
sensor_states + sensor_states +
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx'] ['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
), ),
'so-idh': ( 'so-idh': (
ssl_states +
['idh'] ['idh']
), ),
'so-import': ( 'so-import': (
ssl_states +
manager_states + manager_states +
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list + sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager'] ['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
), ),
'so-manager': ( 'so-manager': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -96,6 +106,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managerhype': ( 'so-managerhype': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] + ['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
stig_states + stig_states +
@@ -103,6 +114,7 @@
elastic_stack_states elastic_stack_states
), ),
'so-managersearch': ( 'so-managersearch': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
stig_states + stig_states +
@@ -110,10 +122,12 @@
elastic_stack_states elastic_stack_states
), ),
'so-searchnode': ( 'so-searchnode': (
ssl_states +
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] + ['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
stig_states stig_states
), ),
'so-standalone': ( 'so-standalone': (
ssl_states +
manager_states + manager_states +
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
sensor_states + sensor_states +
@@ -122,24 +136,29 @@
elastic_stack_states elastic_stack_states
), ),
'so-sensor': ( 'so-sensor': (
ssl_states +
sensor_states + sensor_states +
['nginx'] + ['nginx'] +
stig_states stig_states
), ),
'so-fleet': ( 'so-fleet': (
ssl_states +
stig_states + stig_states +
['logstash', 'nginx', 'healthcheck', 'elasticfleet'] ['logstash', 'nginx', 'healthcheck', 'elasticfleet']
), ),
'so-receiver': ( 'so-receiver': (
ssl_states +
kafka_states + kafka_states +
stig_states + stig_states +
['logstash', 'redis'] ['logstash', 'redis']
), ),
'so-hypervisor': ( 'so-hypervisor': (
ssl_states +
stig_states + stig_states +
['hypervisor', 'libvirt'] ['hypervisor', 'libvirt']
), ),
'so-desktop': ( 'so-desktop': (
['ssl', 'docker_clean', 'telegraf'] +
stig_states stig_states
) )
} %} } %}
+2 -4
View File
@@ -1,12 +1,10 @@
{% macro remove_comments(bpfmerged, app) %} {% macro remove_comments(bpfmerged, app) %}
{# remove comments from the bpf #} {# remove comments from the bpf #}
{% set app_list = [] %}
{% for bpf in bpfmerged[app] %} {% for bpf in bpfmerged[app] %}
{% if not bpf.strip().startswith('#') %} {% if bpf.strip().startswith('#') %}
{% do app_list.append(bpf) %} {% do bpfmerged[app].pop(loop.index0) %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% do bpfmerged.update({app: app_list}) %}
{% endmacro %} {% endmacro %}
-11
View File
@@ -1,7 +1,4 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set PCAP_BPF_STATUS = 0 %}
{% set STENO_BPF_COMPILED = "" %}
{% if GLOBALS.pcap_engine == "TRANSITION" %} {% if GLOBALS.pcap_engine == "TRANSITION" %}
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %} {% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
{% else %} {% else %}
@@ -11,11 +8,3 @@
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }} {{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %} {% set PCAPBPF = BPFMERGED.pcap %}
{% endif %} {% endif %}
{% if PCAPBPF %}
{% set PCAP_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
{% if PCAP_BPF_CALC['retcode'] == 0 %}
{% set PCAP_BPF_STATUS = 1 %}
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
{% endif %}
{% endif %}
+2 -2
View File
@@ -1,11 +1,11 @@
bpf: bpf:
pcap: pcap:
description: List of BPF filters to apply to the PCAP engine. description: List of BPF filters to apply to Stenographer.
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: bpf.html helpLink: bpf.html
suricata: suricata:
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata. description: List of BPF filters to apply to Suricata.
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: bpf.html helpLink: bpf.html
-9
View File
@@ -1,16 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% set SURICATA_BPF_STATUS = 0 %}
{% import 'bpf/macros.jinja' as MACROS %} {% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }} {{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
{% set SURICATABPF = BPFMERGED.suricata %} {% set SURICATABPF = BPFMERGED.suricata %}
{% if SURICATABPF %}
{% set SURICATA_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
{% set SURICATA_BPF_STATUS = 1 %}
{% endif %}
{% endif %}
-9
View File
@@ -1,16 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% set ZEEK_BPF_STATUS = 0 %}
{% import 'bpf/macros.jinja' as MACROS %} {% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }} {{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
{% set ZEEKBPF = BPFMERGED.zeek %} {% set ZEEKBPF = BPFMERGED.zeek %}
{% if ZEEKBPF %}
{% set ZEEK_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
{% set ZEEK_BPF_STATUS = 1 %}
{% endif %}
{% endif %}
+4
View File
@@ -0,0 +1,4 @@
pki_issued_certs:
file.directory:
- name: /etc/pki/issued_certs
- makedirs: True
+63 -3
View File
@@ -3,10 +3,70 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
{% if GLOBALS.is_manager %} - ca.dirs
- ca.server
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 0
- backup: True
- replace: False
- require:
- sls: ca.dirs
- timeout: 30
- retry:
attempts: 5
interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %} {% endif %}
- ca.trustca
-3
View File
@@ -1,3 +0,0 @@
{% set CA = {
'server': pillar.ca.server
}%}
+2 -30
View File
@@ -1,35 +1,7 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one pki_private_key:
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set setup_running = salt['cmd.retcode']('pgrep -x so-setup') == 0 %}
{% if setup_running%}
include:
- ssl.remove
remove_pki_private_key:
file.absent: file.absent:
- name: /etc/pki/ca.key - name: /etc/pki/ca.key
remove_pki_public_ca_crt: pki_public_ca_crt:
file.absent: file.absent:
- name: /etc/pki/ca.crt - name: /etc/pki/ca.crt
remove_trusttheca:
file.absent:
- name: /etc/pki/tls/certs/intca.crt
remove_pki_public_ca_crt_symlink:
file.absent:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
{% else %}
so-setup_not_running:
test.show_notification:
- text: "This state is reserved for usage during so-setup."
{% endif %}
-63
View File
@@ -1,63 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- keysize: 4096
- passphrase:
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
pki_public_ca_crt:
x509.certificate_managed:
- name: /etc/pki/ca.crt
- signing_private_key: /etc/pki/ca.key
- CN: {{ GLOBALS.manager }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 7
- backup: True
- replace: False
- timeout: 30
- retry:
attempts: 5
interval: 30
pki_public_ca_crt_symlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
- target: /etc/pki/ca.crt
- require:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
-26
View File
@@ -1,26 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- docker
# Trust the CA
trusttheca:
file.managed:
- name: /etc/pki/tls/certs/intca.crt
- source: salt://ca/files/ca.crt
- watch_in:
- service: docker_running
- show_changes: False
- makedirs: True
{% if GLOBALS.os_family == 'Debian' %}
symlinkca:
file.symlink:
- target: /etc/pki/tls/certs/intca.crt
- name: /etc/ssl/certs/intca.crt
{% endif %}
+1 -8
View File
@@ -8,12 +8,5 @@
"base": "172.17.0.0/24", "base": "172.17.0.0/24",
"size": 24 "size": 24
} }
], ]
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Soft": 1048576,
"Hard": 1048576
}
}
} }
-21
View File
@@ -1,21 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
{% if nsm_exists %}
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
nsm_total:
grains.present:
- name: nsm_total
- value: {{ nsm_total }}
{% else %}
nsm_missing:
test.succeed_without_changes:
- name: /nsm does not exist, skipping grain assignment
{% endif %}
+1 -2
View File
@@ -4,7 +4,6 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
- common.grains
- common.packages - common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state - manager.elasticsearch # needed for elastic_curl_config state
@@ -177,7 +176,7 @@ so-status_script:
- source: salt://common/tools/sbin/so-status - source: salt://common/tools/sbin/so-status
- mode: 755 - mode: 755
{% if GLOBALS.is_sensor %} {% if GLOBALS.role in GLOBALS.sensor_roles %}
# Add sensor cleanup # Add sensor cleanup
so-sensor-clean: so-sensor-clean:
cron.present: cron.present:
+1 -2
View File
@@ -3,8 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% set soversion = salt['cp.get_file_str']('/etc/soversion') %} {% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
{% if '2.4' in soversion or soversion.startswith('3.') %}
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %} {% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
{% if SOC_GLOBAL.global.airgap %} {% if SOC_GLOBAL.global.airgap %}
+3 -20
View File
@@ -29,26 +29,9 @@ fi
interface="$1" interface="$1"
shift shift
tcpdump -i $interface -ddd $@ | tail -n+2 |
# Capture tcpdump output and exit code while read line; do
tcpdump_output=$(tcpdump -i "$interface" -ddd "$@" 2>&1)
tcpdump_exit=$?
if [ $tcpdump_exit -ne 0 ]; then
echo "$tcpdump_output" >&2
exit $tcpdump_exit
fi
# Process the output, skipping the first line
echo "$tcpdump_output" | tail -n+2 | while read -r line; do
cols=( $line ) cols=( $line )
printf "%04x%02x%02x%08x" "${cols[0]}" "${cols[1]}" "${cols[2]}" "${cols[3]}" printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
done done
# Check if the pipeline succeeded
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
exit 1
fi
echo "" echo ""
exit 0
+1 -1
View File
@@ -10,7 +10,7 @@
cat << EOF cat << EOF
so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see: so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see:
https://securityonion.net/docs/salt https://docs.securityonion.net/en/2.4/salt.html
EOF EOF
+20 -66
View File
@@ -10,7 +10,7 @@
# and since this same logic is required during installation, it's included in this file. # and since this same logic is required during installation, it's included in this file.
DEFAULT_SALT_DIR=/opt/so/saltstack/default DEFAULT_SALT_DIR=/opt/so/saltstack/default
DOC_BASE_URL="https://securityonion.net/docs" DOC_BASE_URL="https://docs.securityonion.net/en/2.4"
if [ -z $NOROOT ]; then if [ -z $NOROOT ]; then
# Check for prerequisites # Check for prerequisites
@@ -220,22 +220,12 @@ compare_es_versions() {
} }
copy_new_files() { copy_new_files() {
# Define files to exclude from deletion (relative to their respective base directories)
local EXCLUDE_FILES=(
"salt/hypervisor/soc_hypervisor.yaml"
)
# Build rsync exclude arguments
local EXCLUDE_ARGS=()
for file in "${EXCLUDE_FILES[@]}"; do
EXCLUDE_ARGS+=(--exclude="$file")
done
# Copy new files over to the salt dir # Copy new files over to the salt dir
cd $UPDATE_DIR cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}" rsync -a salt $DEFAULT_SALT_DIR/ --delete
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}" rsync -a pillar $DEFAULT_SALT_DIR/ --delete
chown -R socore:socore $DEFAULT_SALT_DIR/ chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp cd /tmp
} }
@@ -395,7 +385,7 @@ is_manager_node() {
} }
is_sensor_node() { is_sensor_node() {
# Check to see if this is a sensor node # Check to see if this is a sensor (forward) node
is_single_node_grid && return 0 is_single_node_grid && return 0
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
} }
@@ -404,25 +394,6 @@ is_single_node_grid() {
grep "role: so-" /etc/salt/grains | grep -E "eval|standalone|import" &> /dev/null grep "role: so-" /etc/salt/grains | grep -E "eval|standalone|import" &> /dev/null
} }
initialize_elasticsearch_indices() {
local index_names=$1
local default_entry=${2:-'{"@timestamp":"0"}'}
for idx in $index_names; do
if ! so-elasticsearch-query "$idx" --fail --retry 3 --retry-delay 30 >/dev/null 2>&1; then
echo "Index does not already exist. Initializing $idx index."
if retry 3 10 "so-elasticsearch-query "$idx/_doc" -d '$default_entry' -XPOST --fail 2>/dev/null" '"successful":1'; then
echo "Successfully initialized $idx index."
else
echo "Failed to initialize $idx index after 3 attempts."
fi
else
echo "Index $idx already exists. No action needed."
fi
done
}
lookup_bond_interfaces() { lookup_bond_interfaces() {
cat /proc/net/bonding/bond0 | grep "Slave Interface:" | sed -e "s/Slave Interface: //g" cat /proc/net/bonding/bond0 | grep "Slave Interface:" | sed -e "s/Slave Interface: //g"
} }
@@ -470,7 +441,8 @@ lookup_grain() {
lookup_role() { lookup_role() {
id=$(lookup_grain id) id=$(lookup_grain id)
echo "${id##*_}" pieces=($(echo $id | tr '_' ' '))
echo ${pieces[1]}
} }
is_feature_enabled() { is_feature_enabled() {
@@ -574,38 +546,20 @@ run_check_net_err() {
wait_for_salt_minion() { wait_for_salt_minion() {
local minion="$1" local minion="$1"
local max_wait="${2:-30}" local timeout="${2:-5}"
local interval="${3:-2}" local logfile="${3:-'/dev/stdout'}"
local logfile="${4:-'/dev/stdout'}" retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local elapsed=0 local attempt=0
# each attempts would take about 15 seconds
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting for salt-minion '$minion' to be ready..." local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
while [ $elapsed -lt $max_wait ]; do attempt=$((attempt+1))
# Check if service is running if [[ $attempt -eq $maxAttempts ]]; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if salt-minion service is running"
if ! systemctl is-active --quiet salt-minion; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service not running (elapsed: ${elapsed}s)"
sleep $interval
elapsed=$((elapsed + interval))
continue
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service is running"
# Check if minion responds to ping
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if $minion responds to ping"
if salt "$minion" test.ping --timeout=3 --out=json 2>> "$logfile" | grep -q "true"; then
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion '$minion' is connected and ready!"
return 0
fi
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting... (${elapsed}s / ${max_wait}s)"
sleep $interval
elapsed=$((elapsed + interval))
done
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - ERROR: salt-minion '$minion' not ready after $max_wait seconds"
return 1 return 1
fi
sleep 10
done
return 0
} }
salt_minion_count() { salt_minion_count() {
+5
View File
@@ -25,6 +25,7 @@ container_list() {
if [ $MANAGERCHECK == 'so-import' ]; then if [ $MANAGERCHECK == 'so-import' ]; then
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-elasticsearch" "so-elasticsearch"
"so-idstools"
"so-influxdb" "so-influxdb"
"so-kibana" "so-kibana"
"so-kratos" "so-kratos"
@@ -48,6 +49,7 @@ container_list() {
"so-elastic-fleet-package-registry" "so-elastic-fleet-package-registry"
"so-elasticsearch" "so-elasticsearch"
"so-idh" "so-idh"
"so-idstools"
"so-influxdb" "so-influxdb"
"so-kafka" "so-kafka"
"so-kibana" "so-kibana"
@@ -60,6 +62,8 @@ container_list() {
"so-soc" "so-soc"
"so-steno" "so-steno"
"so-strelka-backend" "so-strelka-backend"
"so-strelka-filestream"
"so-strelka-frontend"
"so-strelka-manager" "so-strelka-manager"
"so-suricata" "so-suricata"
"so-telegraf" "so-telegraf"
@@ -67,6 +71,7 @@ container_list() {
) )
else else
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-idstools"
"so-elasticsearch" "so-elasticsearch"
"so-logstash" "so-logstash"
"so-nginx" "so-nginx"
-14
View File
@@ -129,8 +129,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -161,9 +159,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error while parsing document for index \[.ds-logs-kratos-so-.*object mapping for \[file\]" # false positive (mapping error occuring BEFORE kratos index has rolled over in 2.4.210)
fi fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -226,9 +222,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint).*user so_kibana lacks the required permissions \[logs-\1" # Known issue with 3 integrations using kibana_system role vs creating unique api creds with proper permissions.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1
fi fi
RESULT=0 RESULT=0
@@ -275,13 +268,6 @@ for log_file in $(cat /tmp/log_check_files); do
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
check_for_errors check_for_errors
done done
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
if [[ -f /var/log/messages ]]; then
status "Checking log file /var/log/messages"
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
RESULT=1
fi
fi
# Cleanup temp files # Cleanup temp files
rm -f /tmp/log_check_files rm -f /tmp/log_check_files
@@ -6,7 +6,7 @@
# Elastic License 2.0. # Elastic License 2.0.
source /usr/sbin/so-common source /usr/sbin/so-common
doc_desktop_url="$DOC_BASE_URL/desktop" doc_desktop_url="$DOC_BASE_URL/desktop.html"
{# we only want the script to install the desktop if it is OEL -#} {# we only want the script to install the desktop if it is OEL -#}
{% if grains.os == 'OEL' -%} {% if grains.os == 'OEL' -%}
+4 -4
View File
@@ -85,7 +85,7 @@ function suricata() {
docker run --rm \ docker run --rm \
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \ -v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \ -v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
-v /opt/so/rules/suricata/:/etc/suricata/rules:ro \ -v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \
-v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${LOG_PATH}:/var/log/suricata/:rw \
-v ${NSM_PATH}/:/nsm/:rw \ -v ${NSM_PATH}/:/nsm/:rw \
-v "$PCAP:/input.pcap:ro" \ -v "$PCAP:/input.pcap:ro" \
@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
status "- assigning unique identifier to import: $HASH" status "- assigning unique identifier to import: $HASH"
pcap_data=$(pcapinfo "${PCAP}") pcap_data=$(pcapinfo "${PCAP}")
if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
status "- this PCAP file is invalid; skipping" status "- this PCAP file is invalid; skipping"
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1)) INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
else else
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
HASHES="${HASHES} ${HASH}" HASHES="${HASHES} ${HASH}"
fi fi
START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}') START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}') END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
status "- found PCAP data spanning dates $START through $END" status "- found PCAP data spanning dates $START through $END"
# compare $START to $START_OLDEST # compare $START to $START_OLDEST
+16 -3
View File
@@ -3,16 +3,29 @@
{# we only want this state to run it is CentOS #} {# we only want this state to run it is CentOS #}
{% if GLOBALS.os == 'OEL' %} {% if GLOBALS.os == 'OEL' %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
{% set manager = GLOBALS.manager %}
{% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
{% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
{% do global_ca_server.append(host) %}
{% endif %}
{% endfor %}
{% set trusttheca_text = global_ca_text[0] %}
{% set ca_server = global_ca_server[0] %}
trusted_ca: trusted_ca:
file.managed: x509.pem_managed:
- name: /etc/pki/ca-trust/source/anchors/ca.crt - name: /etc/pki/ca-trust/source/anchors/ca.crt
- source: salt://ca/files/ca.crt - text: {{ trusttheca_text }}
update_ca_certs: update_ca_certs:
cmd.run: cmd.run:
- name: update-ca-trust - name: update-ca-trust
- onchanges: - onchanges:
- file: trusted_ca - x509: trusted_ca
{% else %} {% else %}
+5
View File
@@ -24,6 +24,11 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
'so-idstools':
final_octet: 25
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-influxdb': 'so-influxdb':
final_octet: 26 final_octet: 26
port_bindings: port_bindings:
+17 -16
View File
@@ -6,9 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# docker service requires the ca.crt # include ssl since docker service requires the intca
include: include:
- ca - ssl
dockergroup: dockergroup:
group.present: group.present:
@@ -20,20 +20,20 @@ dockergroup:
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 2.2.1-1~debian.12~bookworm - containerd.io: 1.7.21-1
- docker-ce: 5:29.2.1-1~debian.12~bookworm - docker-ce: 5:27.2.0-1~debian.12~bookworm
- docker-ce-cli: 5:29.2.1-1~debian.12~bookworm - docker-ce-cli: 5:27.2.0-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:29.2.1-1~debian.12~bookworm - docker-ce-rootless-extras: 5:27.2.0-1~debian.12~bookworm
- hold: True - hold: True
- update_holds: True - update_holds: True
{% elif grains.oscodename == 'jammy' %} {% elif grains.oscodename == 'jammy' %}
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 2.2.1-1~ubuntu.22.04~jammy - containerd.io: 1.7.21-1
- docker-ce: 5:29.2.1-1~ubuntu.22.04~jammy - docker-ce: 5:27.2.0-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:29.2.1-1~ubuntu.22.04~jammy - docker-ce-cli: 5:27.2.0-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:29.2.1-1~ubuntu.22.04~jammy - docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.22.04~jammy
- hold: True - hold: True
- update_holds: True - update_holds: True
{% else %} {% else %}
@@ -51,10 +51,10 @@ dockerheldpackages:
dockerheldpackages: dockerheldpackages:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- containerd.io: 2.2.1-1.el9 - containerd.io: 1.7.21-3.1.el9
- docker-ce: 3:29.2.1-1.el9 - docker-ce: 3:27.2.0-1.el9
- docker-ce-cli: 1:29.2.1-1.el9 - docker-ce-cli: 1:27.2.0-1.el9
- docker-ce-rootless-extras: 29.2.1-1.el9 - docker-ce-rootless-extras: 27.2.0-1.el9
- hold: True - hold: True
- update_holds: True - update_holds: True
{% endif %} {% endif %}
@@ -89,9 +89,10 @@ docker_running:
- enable: True - enable: True
- watch: - watch:
- file: docker_daemon - file: docker_daemon
- x509: trusttheca
- require: - require:
- file: docker_daemon - file: docker_daemon
- file: trusttheca - x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present # Reserve OS ports for Docker proxy in case boot settings are not already applied/present
@@ -117,4 +118,4 @@ sos_docker_net:
com.docker.network.bridge.enable_ip_masquerade: 'true' com.docker.network.bridge.enable_ip_masquerade: 'true'
com.docker.network.bridge.enable_icc: 'true' com.docker.network.bridge.enable_icc: 'true'
com.docker.network.bridge.host_binding_ipv4: '0.0.0.0' com.docker.network.bridge.host_binding_ipv4: '0.0.0.0'
- unless: ip l | grep sobridge - unless: 'docker network ls | grep sobridge'
+1
View File
@@ -41,6 +41,7 @@ docker:
forcedType: "[]string" forcedType: "[]string"
so-elastic-fleet: *dockerOptions so-elastic-fleet: *dockerOptions
so-elasticsearch: *dockerOptions so-elasticsearch: *dockerOptions
so-idstools: *dockerOptions
so-influxdb: *dockerOptions so-influxdb: *dockerOptions
so-kibana: *dockerOptions so-kibana: *dockerOptions
so-kratos: *dockerOptions so-kratos: *dockerOptions
+1 -1
View File
@@ -60,7 +60,7 @@ so-elastalert:
- watch: - watch:
- file: elastaconf - file: elastaconf
- onlyif: - onlyif:
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #} - "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
delete_so-elastalert_so-status.disabled: delete_so-elastalert_so-status.disabled:
file.uncomment: file.uncomment:
-3
View File
@@ -9,7 +9,6 @@
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ca
- elasticagent.config - elasticagent.config
- elasticagent.sostatus - elasticagent.sostatus
@@ -56,10 +55,8 @@ so-elastic-agent:
{% endif %} {% endif %}
- require: - require:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
- watch: - watch:
- file: create-elastic-agent-config - file: create-elastic-agent-config
- file: trusttheca
delete_so-elastic-agent_so-status.disabled: delete_so-elastic-agent_so-status.disabled:
file.uncomment: file.uncomment:
+310 -159
View File
@@ -3,7 +3,7 @@
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} {%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
id: aea1ba80-1065-11ee-a369-97538913b6a9 id: aea1ba80-1065-11ee-a369-97538913b6a9
revision: 4 revision: 1
outputs: outputs:
default: default:
type: elasticsearch type: elasticsearch
@@ -22,133 +22,242 @@ agent:
metrics: false metrics: false
features: {} features: {}
inputs: inputs:
- id: filestream-filestream-85820eb0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-logs-fefef78c-422f-4cfa-8abf-4cd1b9428f62
name: import-suricata-logs name: import-evtx-logs
revision: 3 revision: 2
type: filestream type: logfile
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 85820eb0-25ef-11f0-a18d-1b26f69b8310 package_policy_id: fefef78c-422f-4cfa-8abf-4cd1b9428f62
streams: streams:
- id: filestream-filestream.generic-85820eb0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-fefef78c-422f-4cfa-8abf-4cd1b9428f62
data_stream: data_stream:
dataset: import dataset: import
paths: paths:
- /nsm/import/*/suricata/eve*.json - /nsm/import/*/evtx/*.json
pipeline: suricata.common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
ignore_older: 72h
clean_inactive: -1
parsers: null
processors:
- add_fields:
target: event
fields:
category: network
module: suricata
imported: true
- dissect:
tokenizer: /nsm/import/%{import.id}/suricata/%{import.file}
field: log.file.path
target_prefix: ''
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-86b4e960-25ef-11f0-a18d-1b26f69b8310
name: import-zeek-logs
revision: 3
type: filestream
use_output: default
meta:
package:
name: filestream
version:
data_stream:
namespace: so
package_policy_id: 86b4e960-25ef-11f0-a18d-1b26f69b8310
streams:
- id: filestream-filestream.generic-86b4e960-25ef-11f0-a18d-1b26f69b8310
data_stream:
dataset: import
paths:
- /nsm/import/*/zeek/logs/*.log
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- >-
(broker|capture_loss|cluster|conn-summary|console|ecat_arp_info|known_certs|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout).log$
clean_inactive: -1
parsers: null
processors: processors:
- dissect: - dissect:
tokenizer: /nsm/import/%{import.id}/zeek/logs/%{import.file}
field: log.file.path field: log.file.path
tokenizer: '/nsm/import/%{import.id}/evtx/%{import.file}'
target_prefix: '' target_prefix: ''
- script:
lang: javascript
source: |
function process(event) {
var pl = event.Get("import.file").slice(0,-4);
event.Put("@metadata.pipeline", "zeek." + pl);
}
- add_fields:
target: event
fields:
category: network
module: zeek
imported: true
- add_tags:
tags: ics
when:
regexp:
import.file: >-
^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*
file_identity.native: null
prospector.scanner.fingerprint.enabled: false
- id: filestream-filestream-91741240-25ef-11f0-a18d-1b26f69b8310
name: soc-sensoroni-logs
revision: 3
type: filestream
use_output: default
meta:
package:
name: filestream
version:
data_stream:
namespace: so
package_policy_id: 91741240-25ef-11f0-a18d-1b26f69b8310
streams:
- id: filestream-filestream.generic-91741240-25ef-11f0-a18d-1b26f69b8310
data_stream:
dataset: soc
paths:
- /opt/so/log/sensoroni/sensoroni.log
pipeline: common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors:
- decode_json_fields: - decode_json_fields:
fields: fields:
- message - message
target: sensoroni target: ''
- drop_fields:
ignore_missing: true
fields:
- host
- add_fields:
fields:
dataset: system.security
type: logs
namespace: default
target: data_stream
- add_fields:
fields:
dataset: system.security
module: system
imported: true
target: event
- then:
- add_fields:
fields:
dataset: windows.sysmon_operational
target: data_stream
- add_fields:
fields:
dataset: windows.sysmon_operational
module: windows
imported: true
target: event
if:
equals:
winlog.channel: Microsoft-Windows-Sysmon/Operational
- then:
- add_fields:
fields:
dataset: system.application
target: data_stream
- add_fields:
fields:
dataset: system.application
target: event
if:
equals:
winlog.channel: Application
- then:
- add_fields:
fields:
dataset: system.system
target: data_stream
- add_fields:
fields:
dataset: system.system
target: event
if:
equals:
winlog.channel: System
- then:
- add_fields:
fields:
dataset: windows.powershell_operational
target: data_stream
- add_fields:
fields:
dataset: windows.powershell_operational
module: windows
target: event
if:
equals:
winlog.channel: Microsoft-Windows-PowerShell/Operational
tags:
- import
- id: logfile-redis-fc98c947-7d17-4861-a318-7ad075f6d1b0
name: redis-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: redis
version:
data_stream:
namespace: default
package_policy_id: fc98c947-7d17-4861-a318-7ad075f6d1b0
streams:
- id: logfile-redis.log-fc98c947-7d17-4861-a318-7ad075f6d1b0
data_stream:
dataset: redis.log
type: logs
exclude_files:
- .gz$
paths:
- /opt/so/log/redis/redis.log
tags:
- redis-log
exclude_lines:
- '^\s+[\-`(''.|_]'
- id: logfile-logs-3b56803d-5ade-4c93-b25e-9b37182f66b8
name: import-suricata-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 3b56803d-5ade-4c93-b25e-9b37182f66b8
streams:
- id: logfile-log.log-3b56803d-5ade-4c93-b25e-9b37182f66b8
data_stream:
dataset: import
pipeline: suricata.common
paths:
- /nsm/import/*/suricata/eve*.json
processors:
- add_fields:
fields:
module: suricata
imported: true
category: network
target: event
- dissect:
field: log.file.path
tokenizer: '/nsm/import/%{import.id}/suricata/%{import.file}'
target_prefix: ''
- id: logfile-logs-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
name: soc-server-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: c327e1a3-1ebe-449c-a8eb-f6f35032e69d
streams:
- id: logfile-log.log-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/sensoroni-server.log
processors:
- decode_json_fields:
add_error_key: true
process_array: true process_array: true
max_depth: 2 max_depth: 2
add_error_key: true
- add_fields:
target: event
fields: fields:
- message
target: soc
- add_fields:
fields:
module: soc
dataset_temp: server
category: host category: host
target: event
- rename:
ignore_missing: true
fields:
- from: soc.fields.sourceIp
to: source.ip
- from: soc.fields.status
to: http.response.status_code
- from: soc.fields.method
to: http.request.method
- from: soc.fields.path
to: url.path
- from: soc.message
to: event.action
- from: soc.level
to: log.level
tags:
- so-soc
- id: logfile-logs-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
name: soc-sensoroni-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
streams:
- id: logfile-log.log-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/sensoroni/sensoroni.log
processors:
- decode_json_fields:
add_error_key: true
process_array: true
max_depth: 2
fields:
- message
target: sensoroni
- add_fields:
fields:
module: soc module: soc
dataset_temp: sensoroni dataset_temp: sensoroni
category: host
target: event
- rename: - rename:
ignore_missing: true
fields: fields:
- from: sensoroni.fields.sourceIp - from: sensoroni.fields.sourceIp
to: source.ip to: source.ip
@@ -162,100 +271,141 @@ inputs:
to: event.action to: event.action
- from: sensoroni.level - from: sensoroni.level
to: log.level to: log.level
ignore_missing: true - id: logfile-logs-df0d7f2c-221f-433b-b18b-d1cf83250515
file_identity.native: null name: soc-salt-relay-logs
prospector.scanner.fingerprint.enabled: false revision: 2
- id: filestream-filestream-976e3900-25ef-11f0-a18d-1b26f69b8310 type: logfile
name: suricata-logs
revision: 3
type: filestream
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 976e3900-25ef-11f0-a18d-1b26f69b8310 package_policy_id: df0d7f2c-221f-433b-b18b-d1cf83250515
streams: streams:
- id: filestream-filestream.generic-976e3900-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-df0d7f2c-221f-433b-b18b-d1cf83250515
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/salt-relay.log
processors:
- dissect:
field: message
tokenizer: '%{soc.ts} | %{event.action}'
target_prefix: ''
- add_fields:
fields:
module: soc
dataset_temp: salt_relay
category: host
target: event
tags:
- so-soc
- id: logfile-logs-74bd2366-fe52-493c-bddc-843a017fc4d0
name: soc-auth-sync-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: 74bd2366-fe52-493c-bddc-843a017fc4d0
streams:
- id: logfile-log.log-74bd2366-fe52-493c-bddc-843a017fc4d0
data_stream:
dataset: soc
pipeline: common
paths:
- /opt/so/log/soc/sync.log
processors:
- dissect:
field: message
tokenizer: '%{event.action}'
target_prefix: ''
- add_fields:
fields:
module: soc
dataset_temp: auth_sync
category: host
target: event
tags:
- so-soc
- id: logfile-logs-d151d9bf-ff2a-4529-9520-c99244bc0253
name: suricata-logs
revision: 2
type: logfile
use_output: default
meta:
package:
name: log
version:
data_stream:
namespace: so
package_policy_id: d151d9bf-ff2a-4529-9520-c99244bc0253
streams:
- id: logfile-log.log-d151d9bf-ff2a-4529-9520-c99244bc0253
data_stream: data_stream:
dataset: suricata dataset: suricata
pipeline: suricata.common
paths: paths:
- /nsm/suricata/eve*.json - /nsm/suricata/eve*.json
pipeline: suricata.common
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors: processors:
- add_fields: - add_fields:
target: event
fields: fields:
category: network
module: suricata module: suricata
file_identity.native: null category: network
prospector.scanner.fingerprint.enabled: false target: event
- id: filestream-filestream-95091fe0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-logs-31f94d05-ae75-40ee-b9c5-0e0356eff327
name: strelka-logs name: strelka-logs
revision: 3 revision: 2
type: filestream type: logfile
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 95091fe0-25ef-11f0-a18d-1b26f69b8310 package_policy_id: 31f94d05-ae75-40ee-b9c5-0e0356eff327
streams: streams:
- id: filestream-filestream.generic-95091fe0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-31f94d05-ae75-40ee-b9c5-0e0356eff327
data_stream: data_stream:
dataset: strelka dataset: strelka
pipeline: strelka.file
paths: paths:
- /nsm/strelka/log/strelka.log - /nsm/strelka/log/strelka.log
pipeline: strelka.file
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- \.gz$
clean_inactive: -1
parsers: null
processors: processors:
- add_fields: - add_fields:
target: event
fields: fields:
category: file
module: strelka module: strelka
file_identity.native: null category: file
prospector.scanner.fingerprint.enabled: false target: event
- id: filestream-filestream-9f309ca0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-logs-6197fe84-9b58-4d9b-8464-3d517f28808d
name: zeek-logs name: zeek-logs
revision: 2 revision: 1
type: filestream type: logfile
use_output: default use_output: default
meta: meta:
package: package:
name: filestream name: log
version: version:
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 9f309ca0-25ef-11f0-a18d-1b26f69b8310 package_policy_id: 6197fe84-9b58-4d9b-8464-3d517f28808d
streams: streams:
- id: filestream-filestream.generic-9f309ca0-25ef-11f0-a18d-1b26f69b8310 - id: logfile-log.log-6197fe84-9b58-4d9b-8464-3d517f28808d
data_stream: data_stream:
dataset: zeek dataset: zeek
paths: paths:
- /nsm/zeek/logs/current/*.log - /nsm/zeek/logs/current/*.log
prospector.scanner.recursive_glob: true
prospector.scanner.exclude_files:
- >-
(broker|capture_loss|cluster|conn-summary|console|ecat_arp_info|known_certs|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout).log$
clean_inactive: -1
parsers: null
processors: processors:
- dissect: - dissect:
tokenizer: /nsm/zeek/logs/current/%{pipeline}.log tokenizer: '/nsm/zeek/logs/current/%{pipeline}.log'
field: log.file.path field: log.file.path
trim_chars: .log trim_chars: .log
target_prefix: '' target_prefix: ''
@@ -277,17 +427,18 @@ inputs:
regexp: regexp:
pipeline: >- pipeline: >-
^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm* ^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*
file_identity.native: null exclude_files:
prospector.scanner.fingerprint.enabled: false - >-
broker|capture_loss|cluster|ecat_arp_info|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout.log$
- id: udp-udp-35051de0-46a5-11ee-8d5d-9f98c8182f60 - id: udp-udp-35051de0-46a5-11ee-8d5d-9f98c8182f60
name: syslog-udp-514 name: syslog-udp-514
revision: 4 revision: 3
type: udp type: udp
use_output: default use_output: default
meta: meta:
package: package:
name: udp name: udp
version: version: 1.10.0
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 35051de0-46a5-11ee-8d5d-9f98c8182f60 package_policy_id: 35051de0-46a5-11ee-8d5d-9f98c8182f60
@@ -307,13 +458,13 @@ inputs:
- syslog - syslog
- id: tcp-tcp-33d37bb0-46a5-11ee-8d5d-9f98c8182f60 - id: tcp-tcp-33d37bb0-46a5-11ee-8d5d-9f98c8182f60
name: syslog-tcp-514 name: syslog-tcp-514
revision: 4 revision: 3
type: tcp type: tcp
use_output: default use_output: default
meta: meta:
package: package:
name: tcp name: tcp
version: version: 1.10.0
data_stream: data_stream:
namespace: so namespace: so
package_policy_id: 33d37bb0-46a5-11ee-8d5d-9f98c8182f60 package_policy_id: 33d37bb0-46a5-11ee-8d5d-9f98c8182f60
-34
View File
@@ -1,34 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{# advanced config_yaml options for elasticfleet logstash output #}
{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %}
{% set ADV_OUTPUT_LOGSTASH = {} %}
{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %}
{% if v != "" and v is not none %}
{% if k == 'queue_mem_events' %}
{# rename queue_mem_events queue.mem.events #}
{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %}
{% elif k == 'loadbalance' %}
{% if v %}
{# only include loadbalance config when its True #}
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
{% endif %}
{% else %}
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
{% endif %}
{% endif %}
{% endfor %}
{% set LOGSTASH_CONFIG_YAML_RAW = [] %}
{% if ADV_OUTPUT_LOGSTASH %}
{% for k, v in ADV_OUTPUT_LOGSTASH.items() %}
{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %}
{% endfor %}
{% endif %}
{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %}
-4
View File
@@ -11,7 +11,6 @@
include: include:
- elasticfleet.artifact_registry - elasticfleet.artifact_registry
- elasticfleet.ssl
# Add EA Group # Add EA Group
elasticfleetgroup: elasticfleetgroup:
@@ -96,9 +95,6 @@ soresourcesrepoclone:
- rev: 'main' - rev: 'main'
- depth: 1 - depth: 1
- force_reset: True - force_reset: True
- retry:
attempts: 3
interval: 10
{% endif %} {% endif %}
elasticdefendconfdir: elasticdefendconfdir:
+1 -8
View File
@@ -10,19 +10,12 @@ elasticfleet:
grid_enrollment: '' grid_enrollment: ''
defend_filters: defend_filters:
enable_auto_configuration: False enable_auto_configuration: False
outputs:
logstash:
bulk_max_size: ''
worker: ''
queue_mem_events: ''
timeout: ''
loadbalance: False
compression_level: ''
subscription_integrations: False subscription_integrations: False
auto_upgrade_integrations: False auto_upgrade_integrations: False
logging: logging:
zeek: zeek:
excluded: excluded:
- analyzer
- broker - broker
- capture_loss - capture_loss
- cluster - cluster
+1 -28
View File
@@ -13,10 +13,9 @@
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %} {% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
include: include:
- ca
- logstash.ssl
- elasticfleet.config - elasticfleet.config
- elasticfleet.sostatus - elasticfleet.sostatus
- ssl
{% if grains.role not in ['so-fleet'] %} {% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready # Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
@@ -33,17 +32,6 @@ so-elastic-fleet-auto-configure-logstash-outputs:
- retry: - retry:
attempts: 4 attempts: 4
interval: 30 interval: 30
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
so-elastic-fleet-auto-configure-logstash-outputs-force:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
- retry:
attempts: 4
interval: 30
- onchanges:
- x509: etc_elasticfleet_logstash_crt
- x509: elasticfleet_kafka_crt
{% endif %} {% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection # If enabled, automatically update Fleet Server URLs & ES Connection
@@ -134,11 +122,6 @@ so-elastic-fleet:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca
- x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt
- require:
- file: trusttheca
- x509: etc_elasticfleet_key - x509: etc_elasticfleet_key
- x509: etc_elasticfleet_crt - x509: etc_elasticfleet_crt
{% endif %} {% endif %}
@@ -152,18 +135,12 @@ so-elastic-fleet-package-statefile:
so-elastic-fleet-package-upgrade: so-elastic-fleet-package-upgrade:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade - name: /usr/sbin/so-elastic-fleet-package-upgrade
- retry:
attempts: 3
interval: 10
- onchanges: - onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt - file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations: so-elastic-fleet-integrations:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load - name: /usr/sbin/so-elastic-fleet-integration-policy-load
- retry:
attempts: 3
interval: 10
so-elastic-agent-grid-upgrade: so-elastic-agent-grid-upgrade:
cmd.run: cmd.run:
@@ -175,11 +152,7 @@ so-elastic-agent-grid-upgrade:
so-elastic-fleet-integration-upgrade: so-elastic-fleet-integration-upgrade:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade - name: /usr/sbin/so-elastic-fleet-integration-upgrade
- retry:
attempts: 3
interval: 10
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
so-elastic-fleet-addon-integrations: so-elastic-fleet-addon-integrations:
cmd.run: cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load - name: /usr/sbin/so-elastic-fleet-optional-integrations-load
@@ -2,7 +2,7 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-zeek-logs", "name": "import-zeek-logs",
@@ -10,31 +10,19 @@
"description": "Zeek Import logs", "description": "Zeek Import logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/zeek/logs/*.log" "/nsm/import/*/zeek/logs/*.log"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -11,47 +11,31 @@
{%- endif -%} {%- endif -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "kratos-logs", "name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/kratos/kratos.log" "/opt/so/log/kratos/kratos.log"
], ],
"data_stream.dataset": "kratos", "data_stream.dataset": "kratos",
"pipeline": "kratos", "tags": ["so-kratos"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
{%- if valid_identities -%} {%- if valid_identities -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
{%- else -%} {%- else -%}
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
{%- endif -%} {%- endif -%}
"tags": [ "custom": "pipeline: kratos"
"so-kratos"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -59,3 +43,4 @@
}, },
"force": true "force": true
} }
@@ -2,38 +2,28 @@
{%- raw -%} {%- raw -%}
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"id": "zeek-logs",
"name": "zeek-logs", "name": "zeek-logs",
"namespace": "so", "namespace": "so",
"description": "Zeek logs", "description": "Zeek logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/zeek/logs/current/*.log" "/nsm/zeek/logs/current/*.log"
], ],
"data_stream.dataset": "zeek", "data_stream.dataset": "zeek",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"clean_inactive": -1, "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "9.0.2", "version": "8.18.1",
"requires_root": true "requires_root": true
}, },
"enabled": true, "enabled": true,
@@ -40,7 +40,7 @@
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/elasticsearch/*.json" "/opt/so/log/elasticsearch/*.log"
] ]
} }
}, },
@@ -1,43 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "hydra-logs", "name": "hydra-logs",
"namespace": "so",
"description": "Hydra logs", "description": "Hydra logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/hydra/hydra.log" "/opt/so/log/hydra/hydra.log"
], ],
"data_stream.dataset": "hydra", "data_stream.dataset": "hydra",
"pipeline": "hydra", "tags": ["so-hydra"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"exclude_files": [ "custom": "pipeline: hydra"
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
"tags": [
"so-hydra"
],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -45,5 +28,3 @@
}, },
"force": true "force": true
} }
@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "idh-logs", "name": "idh-logs",
"namespace": "so",
"description": "IDH integration", "description": "IDH integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/idh/opencanary.log" "/nsm/idh/opencanary.log"
], ],
"data_stream.dataset": "idh", "data_stream.dataset": "idh",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,42 +1,29 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-evtx-logs", "name": "import-evtx-logs",
"namespace": "so",
"description": "Import Windows EVTX logs", "description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/evtx/*.json" "/nsm/import/*/evtx/*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "custom": "",
"exclude_files": [ "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.5.4\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.5.4\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.5.4\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
], ]
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,41 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "import-suricata-logs", "name": "import-suricata-logs",
"namespace": "so",
"description": "Import Suricata logs", "description": "Import Suricata logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/suricata/eve*.json" "/nsm/import/*/suricata/eve*.json"
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
"ignore_older": "72h", "custom": "pipeline: suricata.common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -15,7 +15,7 @@
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/redis/redis-server.log" "/opt/so/log/redis/redis.log"
], ],
"tags": [ "tags": [
"redis-log" "redis-log"
@@ -1,17 +1,18 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "rita-logs", "name": "rita-logs",
"namespace": "so",
"description": "RITA Logs", "description": "RITA Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
@@ -19,28 +20,15 @@
"/nsm/rita/exploded-dns.csv", "/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv" "/nsm/rita/long-connections.csv"
], ],
"data_stream.dataset": "rita", "exclude_files": [],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"tags": [],
"recursive_glob": true,
"ignore_older": "72h", "ignore_older": "72h",
"clean_inactive": -1, "data_stream.dataset": "rita",
"harvester_limit": 0, "tags": [],
"fingerprint": false, "processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"fingerprint_offset": 0, "custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
"fingerprint_length": "64", }
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
},
"force": true
} }
@@ -1,41 +1,29 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "so-ip-mappings", "name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings", "description": "IP Description mappings",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so", "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/custom-mappings/ip-descriptions.csv" "/nsm/custom-mappings/ip-descriptions.csv"
], ],
"data_stream.dataset": "hostnamemappings", "data_stream.dataset": "hostnamemappings",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"tags": [ "tags": [
"so-ip-mappings" "so-ip-mappings"
], ],
"recursive_glob": true, "processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
"clean_inactive": -1, "custom": ""
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -43,3 +31,5 @@
}, },
"force": true "force": true
} }
@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-auth-sync-logs", "name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs", "description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sync.log" "/opt/so/log/soc/sync.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync", "processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"tags": [], "custom": "pipeline: common"
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,44 +1,31 @@
{ {
"policy_id": "so-grid-nodes_general",
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-detections-logs", "name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs", "description": "Security Onion Console - Detections Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log", "/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log" "/opt/so/log/soc/detections_runtime-status_yara.log"
], ],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "tags": [
"so-soc" "so-soc"
], ],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"ignore_older": "72h", "custom": "pipeline: common"
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,42 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-salt-relay-logs", "name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs", "description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/salt-relay.log" "/opt/so/log/soc/salt-relay.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay", "processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,44 +1,30 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-sensoroni-logs", "name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs", "description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/sensoroni/sensoroni.log" "/opt/so/log/sensoroni/sensoroni.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"clean_inactive": -1, "custom": "pipeline: common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
} }
}, },
"force": true "force": true
} }
@@ -1,42 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "soc-server-logs", "name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs", "description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sensoroni-server.log" "/opt/so/log/soc/sensoroni-server.log"
], ],
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "tags": ["so-soc"],
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"tags": [ "custom": "pipeline: common"
"so-soc"
],
"recursive_glob": true,
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "strelka-logs", "name": "strelka-logs",
"description": "Strelka Logs",
"policy_id": "so-grid-nodes_general",
"namespace": "so", "namespace": "so",
"description": "Strelka logs",
"policy_id": "so-grid-nodes_general",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/strelka/log/strelka.log" "/nsm/strelka/log/strelka.log"
], ],
"data_stream.dataset": "strelka", "data_stream.dataset": "strelka",
"pipeline": "strelka.file",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"clean_inactive": -1, "custom": "pipeline: strelka.file"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,40 +1,26 @@
{ {
"package": { "package": {
"name": "filestream", "name": "log",
"version": "" "version": ""
}, },
"name": "suricata-logs", "name": "suricata-logs",
"namespace": "so",
"description": "Suricata integration", "description": "Suricata integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"namespace": "so",
"inputs": { "inputs": {
"filestream-filestream": { "logs-logfile": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.generic": { "log.logs": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/suricata/eve*.json" "/nsm/suricata/eve*.json"
], ],
"data_stream.dataset": "suricata", "data_stream.dataset": "suricata",
"pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"tags": [], "tags": [],
"recursive_glob": true, "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"clean_inactive": -1, "custom": "pipeline: suricata.common"
"harvester_limit": 0,
"fingerprint": false,
"fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true,
"exclude_lines": [],
"include_lines": []
} }
} }
} }
@@ -1,107 +0,0 @@
{
"package": {
"name": "elasticsearch",
"version": ""
},
"name": "elasticsearch-grid-nodes_heavy",
"namespace": "default",
"description": "Elasticsearch Logs",
"policy_id": "so-grid-nodes_heavy",
"inputs": {
"elasticsearch-logfile": {
"enabled": true,
"streams": {
"elasticsearch.audit": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_audit.json"
]
}
},
"elasticsearch.deprecation": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_deprecation.json"
]
}
},
"elasticsearch.gc": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/gc.log.[0-9]*",
"/var/log/elasticsearch/gc.log"
]
}
},
"elasticsearch.server": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/elasticsearch/*.json"
]
}
},
"elasticsearch.slowlog": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_index_search_slowlog.json",
"/var/log/elasticsearch/*_index_indexing_slowlog.json"
]
}
}
}
},
"elasticsearch-elasticsearch/metrics": {
"enabled": false,
"vars": {
"hosts": [
"http://localhost:9200"
],
"scope": "node"
},
"streams": {
"elasticsearch.stack_monitoring.ccr": {
"enabled": false
},
"elasticsearch.stack_monitoring.cluster_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.enrich": {
"enabled": false
},
"elasticsearch.stack_monitoring.index": {
"enabled": false
},
"elasticsearch.stack_monitoring.index_recovery": {
"enabled": false,
"vars": {
"active.only": true
}
},
"elasticsearch.stack_monitoring.index_summary": {
"enabled": false
},
"elasticsearch.stack_monitoring.ml_job": {
"enabled": false
},
"elasticsearch.stack_monitoring.node": {
"enabled": false
},
"elasticsearch.stack_monitoring.node_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.pending_tasks": {
"enabled": false
},
"elasticsearch.stack_monitoring.shard": {
"enabled": false
}
}
}
},
"force": true
}
+17 -23
View File
@@ -2,32 +2,26 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%} {%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
{% if grains.role == 'so-heavynode' %} {%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
{% endif %}
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %} {% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
{% set AGENT_EXISTS = salt['file.file_exists']('/opt/Elastic/Agent/elastic-agent') %} {% if not AGENT_STATUS %}
{% if not AGENT_STATUS or not AGENT_EXISTS %}
pull_agent_installer:
file.managed:
- name: /opt/so/so-elastic-agent_linux_amd64
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- mode: 755
- makedirs: True
{% if grains.role not in ['so-heavynode'] %}
run_installer: run_installer:
cmd.run: cmd.script:
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} -force - name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- cwd: /opt/so - cwd: /opt/so
- retry: - args: -token={{ GRIDNODETOKENGENERAL }}
attempts: 3 - retry: True
interval: 20 {% else %}
run_installer:
cleanup_agent_installer: cmd.script:
file.absent: - name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
- name: /opt/so/so-elastic-agent_linux_amd64 - cwd: /opt/so
- args: -token={{ GRIDNODETOKENHEAVY }}
- retry: True
{% endif %}
{% endif %} {% endif %}
@@ -21,7 +21,6 @@
'azure_application_insights.app_state': 'azure.app_state', 'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing', 'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function', 'azure_functions.metrics': 'azure.function',
'azure_ai_foundry.metrics': 'azure.ai_foundry',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset', 'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm', 'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance', 'azure_metrics.container_instance': 'azure.container_instance',
@@ -122,9 +121,6 @@
"phases": { "phases": {
"cold": { "cold": {
"actions": { "actions": {
"allocate":{
"number_of_replicas": ""
},
"set_priority": {"priority": 0} "set_priority": {"priority": 0}
}, },
"min_age": "60d" "min_age": "60d"
@@ -141,31 +137,12 @@
"max_age": "30d", "max_age": "30d",
"max_primary_shard_size": "50gb" "max_primary_shard_size": "50gb"
}, },
"forcemerge":{
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 100} "set_priority": {"priority": 100}
}, },
"min_age": "0ms" "min_age": "0ms"
}, },
"warm": { "warm": {
"actions": { "actions": {
"allocate": {
"number_of_replicas": ""
},
"forcemerge": {
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 50} "set_priority": {"priority": 50}
}, },
"min_age": "30d" "min_age": "30d"
-40
View File
@@ -50,46 +50,6 @@ elasticfleet:
global: True global: True
forcedType: bool forcedType: bool
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
outputs:
logstash:
bulk_max_size:
description: The maximum number of events to bulk in a single Logstash request.
global: True
forcedType: int
advanced: True
helpLink: elastic-fleet.html
worker:
description: The number of workers per configured host publishing events.
global: True
forcedType: int
advanced: true
helpLink: elastic-fleet.html
queue_mem_events:
title: queued events
description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output.
global: True
forcedType: int
advanced: True
helpLink: elastic-fleet.html
timeout:
description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s
regex: ^[0-9]+s$
advanced: True
global: True
helpLink: elastic-fleet.html
loadbalance:
description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive.
forcedType: bool
advanced: True
global: True
helpLink: elastic-fleet.html
compression_level:
description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression).
regex: ^[1-9]$
forcedType: int
advanced: True
global: True
helpLink: elastic-fleet.html
server: server:
custom_fqdn: custom_fqdn:
description: Custom FQDN for Agents to connect to. One per line. description: Custom FQDN for Agents to connect to. One per line.
-186
View File
@@ -1,186 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% from 'ca/map.jinja' import CA %}
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
# Start -- Elastic Fleet Host Cert
etc_elasticfleet_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-server.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-server.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-server.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
efperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- group: 939
chownelasticfleetcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Host Cert
{% endif %} # endif is for not including HeavyNodes & Receivers
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
etc_elasticfleet_agent_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-agent.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
- prereq:
- x509: etc_elasticfleet_agent_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_agent_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-agent.crt
- ca_server: {{ CA.server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-agent.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_agent_key
efagentperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- group: 939
chownelasticfleetagentcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.crt
- mode: 640
- user: 947
- group: 939
chownelasticfleetagentkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
elasticfleet_kafka_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
- prereq:
- x509: elasticfleet_kafka_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-kafka.crt
- ca_server: {{ CA.server }}
- signing_policy: kafka
- private_key: /etc/pki/elasticfleet-kafka.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_cert_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.crt
- mode: 640
- user: 947
- group: 939
elasticfleet_kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.key
- mode: 640
- user: 947
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
@@ -23,13 +23,6 @@ fi
# Define a banner to separate sections # Define a banner to separate sections
banner="=========================================================================" banner="========================================================================="
fleet_api() {
local QUERYPATH=$1
shift
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
}
elastic_fleet_integration_check() { elastic_fleet_integration_check() {
AGENT_POLICY=$1 AGENT_POLICY=$1
@@ -46,9 +39,7 @@ elastic_fleet_integration_create() {
JSON_STRING=$1 JSON_STRING=$1
if ! fleet_api "package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
@@ -65,10 +56,7 @@ elastic_fleet_integration_remove() {
'{"packagePolicyIds":[$INTEGRATIONID]}' '{"packagePolicyIds":[$INTEGRATIONID]}'
) )
if ! fleet_api "package_policies/delete" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo "Error: Unable to delete '$NAME' from '$AGENT_POLICY'"
return 1
fi
} }
elastic_fleet_integration_update() { elastic_fleet_integration_update() {
@@ -77,9 +65,7 @@ elastic_fleet_integration_update() {
JSON_STRING=$2 JSON_STRING=$2
if ! fleet_api "package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPUT -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_integration_policy_upgrade() { elastic_fleet_integration_policy_upgrade() {
@@ -91,83 +77,78 @@ elastic_fleet_integration_policy_upgrade() {
'{"packagePolicyIds":[$INTEGRATIONID]}' '{"packagePolicyIds":[$INTEGRATIONID]}'
) )
if ! fleet_api "package_policies/upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_package_version_check() { elastic_fleet_package_version_check() {
PACKAGE=$1 PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
if output=$(fleet_api "epm/packages/$PACKAGE"); then
echo "$output" | jq -r '.item.version'
else
echo "Error: Failed to get current package version for '$PACKAGE'"
return 1
fi
} }
elastic_fleet_package_latest_version_check() { elastic_fleet_package_latest_version_check() {
PACKAGE=$1 PACKAGE=$1
if output=$(fleet_api "epm/packages/$PACKAGE"); then if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" --fail); then
if version=$(jq -e -r '.item.latestVersion' <<< $output); then if version=$(jq -e -r '.item.latestVersion' <<< $output); then
echo "$version" echo "$version"
fi fi
else else
echo "Error: Failed to get latest version for '$PACKAGE'" echo "Error: Failed to get latest version for $PACKAGE"
return 1
fi fi
} }
elastic_fleet_package_install() { elastic_fleet_package_install() {
PKG=$1 PKG=$1
VERSION=$2 VERSION=$2
if ! fleet_api "epm/packages/$PKG/$VERSION" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}'; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
return 1
fi
} }
elastic_fleet_bulk_package_install() { elastic_fleet_bulk_package_install() {
BULK_PKG_LIST=$1 BULK_PKG_LIST=$1
if ! fleet_api "epm/packages/_bulk" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$BULK_PKG_LIST; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$1 "localhost:5601/api/fleet/epm/packages/_bulk"
return 1 }
fi
elastic_fleet_package_is_installed() {
PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
} }
elastic_fleet_installed_packages() { elastic_fleet_installed_packages() {
if ! fleet_api "epm/packages/installed?perPage=500"; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
return 1
fi
} }
elastic_fleet_agent_policy_ids() { elastic_fleet_agent_policy_ids() {
if output=$(fleet_api "agent_policies"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
echo "$output" | jq -r .items[].id if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve agent policies." echo "Error: Failed to retrieve agent policies."
return 1 exit 1
fi
}
elastic_fleet_agent_policy_names() {
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
if [ $? -ne 0 ]; then
echo "Error: Failed to retrieve agent policies."
exit 1
fi fi
} }
elastic_fleet_integration_policy_names() { elastic_fleet_integration_policy_names() {
AGENT_POLICY=$1 AGENT_POLICY=$1
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
echo "$output" | jq -r .item.package_policies[].name if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'." echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
return 1 exit 1
fi fi
} }
elastic_fleet_integration_policy_package_name() { elastic_fleet_integration_policy_package_name() {
AGENT_POLICY=$1 AGENT_POLICY=$1
INTEGRATION=$2 INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name' if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'." echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
return 1 exit 1
fi fi
} }
@@ -175,32 +156,32 @@ elastic_fleet_integration_policy_package_version() {
AGENT_POLICY=$1 AGENT_POLICY=$1
INTEGRATION=$2 INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" --fail); then
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< "$output"); then if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< $output); then
echo "$version" echo "$version"
fi fi
else else
echo "Error: Failed to retrieve integration version for '$INTEGRATION' in policy '$AGENT_POLICY'" echo "Error: Failed to retrieve agent policy $AGENT_POLICY"
return 1 exit 1
fi fi
} }
elastic_fleet_integration_id() { elastic_fleet_integration_id() {
AGENT_POLICY=$1 AGENT_POLICY=$1
INTEGRATION=$2 INTEGRATION=$2
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id' if [ $? -ne 0 ]; then
else
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'." echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
return 1 exit 1
fi fi
} }
elastic_fleet_integration_policy_dryrun_upgrade() { elastic_fleet_integration_policy_dryrun_upgrade() {
INTEGRATION_ID=$1 INTEGRATION_ID=$1
if ! fleet_api "package_policies/upgrade/dryrun" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -XPOST -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"; then curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
if [ $? -ne 0 ]; then
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'." echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
return 1 exit 1
fi fi
} }
@@ -219,8 +200,15 @@ elastic_fleet_policy_create() {
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}' '{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
) )
# Create Fleet Policy # Create Fleet Policy
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
return 1
fi
} }
elastic_fleet_policy_update() {
POLICYID=$1
JSON_STRING=$2
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
@@ -8,7 +8,6 @@
. /usr/sbin/so-elastic-fleet-common . /usr/sbin/so-elastic-fleet-common
ERROR=false
# Manage Elastic Defend Integration for Initial Endpoints Policy # Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do do
@@ -16,20 +15,9 @@ do
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n" printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
ERROR=true
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
ERROR=true
continue
fi
fi fi
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi
@@ -25,9 +25,5 @@ for POLICYNAME in $POLICY; do
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json) .name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Now update the integration policy using the modified JSON # Now update the integration policy using the modified JSON
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"; then elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
# exit 1 on failure to update fleet integration policies, let salt handle retries
echo "Failed to update $POLICYNAME.."
exit 1
fi
done done
@@ -13,101 +13,79 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
/usr/sbin/so-elastic-fleet-package-upgrade /usr/sbin/so-elastic-fleet-package-upgrade
# Second, update Fleet Server policies # Second, update Fleet Server policies
/usr/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server /sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
# Third, configure Elastic Defend Integration seperately # Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n" printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
done done
# Grid Nodes - General # Grid Nodes - General
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_general/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_general/*.json
do
printf "\n\nGrid Nodes Policy_General - Loading $INTEGRATION\n" printf "\n\nGrid Nodes Policy_General - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
done done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Grid Nodes - Heavy # Grid Nodes - Heavy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_heavy/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/grid-nodes_heavy/*.json
do
printf "\n\nGrid Nodes Policy_Heavy - Loading $INTEGRATION\n" printf "\n\nGrid Nodes Policy_Heavy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION" elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if [ "$NAME" != "elasticsearch-logs" ]; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" elastic_fleet_integration_create "@$INTEGRATION"
RETURN_CODE=1
continue
fi fi
fi fi
done done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Fleet Server - Optional integrations # Fleet Server - Optional integrations
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json; do for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json
do
if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then
FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7` FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7`
printf "\n\nFleet Server Policy - Loading $INTEGRATION\n" printf "\n\nFleet Server Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION" elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then elastic_fleet_integration_create "@$INTEGRATION"
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
RETURN_CODE=1
continue
fi
fi fi
fi fi
fi fi
done done
# Only create the state file if all policies were created/updated successfully
if [[ "$RETURN_CODE" != "1" ]]; then if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt touch /opt/so/state/eaintegrations.txt
fi fi
@@ -14,7 +14,7 @@ if ! is_manager_node; then
fi fi
# Get current list of Grid Node Agents that need to be upgraded # Get current list of Grid Node Agents that need to be upgraded
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
@@ -26,7 +26,7 @@ function update_es_urls() {
} }
# Get current list of Fleet Elasticsearch URLs # Get current list of Fleet Elasticsearch URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -24,18 +24,12 @@ fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %}) default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
ERROR=false
for AGENT_POLICY in $agent_policies; do for AGENT_POLICY in $agent_policies; do
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
# this script upgrades default integration packages, exit 1 and let salt handle retrying
exit 1
fi
for INTEGRATION in $integrations; do for INTEGRATION in $integrations; do
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
# Get package name so we know what package to look for when checking the current and latest available version # Get package name so we know what package to look for when checking the current and latest available version
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
exit 1
fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %} {%- if not AUTO_UPGRADE_INTEGRATIONS %}
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
{%- endif %} {%- endif %}
@@ -54,9 +48,7 @@ for AGENT_POLICY in $agent_policies; do
fi fi
# Get integration ID # Get integration ID
if ! INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION"); then INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
exit 1
fi
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
# Dry run of the upgrade # Dry run of the upgrade
@@ -64,23 +56,20 @@ for AGENT_POLICY in $agent_policies; do
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..." echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
echo "Upgrading $INTEGRATION..." echo "Upgrading $INTEGRATION..."
echo "Starting dry run..." echo "Starting dry run..."
if ! DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID"); then DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
exit 1
fi
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors) DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
# If no errors with dry run, proceed with actual upgrade # If no errors with dry run, proceed with actual upgrade
if [[ "$DRYRUN_ERRORS" == "false" ]]; then if [[ "$DRYRUN_ERRORS" == "false" ]]; then
echo "No errors detected. Proceeding with upgrade..." echo "No errors detected. Proceeding with upgrade..."
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
if [ $? -ne 0 ]; then
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'." echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
ERROR=true exit 1
continue
fi fi
else else
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..." echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
ERROR=true exit 1
continue
fi fi
fi fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %} {%- if not AUTO_UPGRADE_INTEGRATIONS %}
@@ -89,7 +78,4 @@ for AGENT_POLICY in $agent_policies; do
fi fi
done done
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi
echo echo
@@ -62,17 +62,9 @@ default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.l
in_use_integrations=() in_use_integrations=()
for AGENT_POLICY in $agent_policies; do for AGENT_POLICY in $agent_policies; do
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
# skip the agent policy if we can't get required info, let salt retry. Integrations loaded by this script are non-default integrations.
echo "Skipping $AGENT_POLICY.. "
continue
fi
for INTEGRATION in $integrations; do for INTEGRATION in $integrations; do
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
echo "Not adding $INTEGRATION, couldn't get package name"
continue
fi
# non-default integrations that are in-use in any policy # non-default integrations that are in-use in any policy
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
in_use_integrations+=("$PACKAGE_NAME") in_use_integrations+=("$PACKAGE_NAME")
@@ -86,7 +78,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list) latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do while read -r package; do
# get package details # get package details
@@ -168,11 +160,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
for file in "${pkg_filename}_"*.json; do for file in "${pkg_filename}_"*.json; do
[ -e "$file" ] || continue [ -e "$file" ] || continue
if ! elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT; then elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT
# integrations loaded my this script are non-essential and shouldn't cause exit, skip them for now next highstate run can retry
echo "Failed to complete a chunk of bulk package installs -- $file "
continue
fi
done done
# cleanup any temp files for chunked package install # cleanup any temp files for chunked package install
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
@@ -180,9 +168,8 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
echo "Elastic integrations don't appear to need installation/updating..." echo "Elastic integrations don't appear to need installation/updating..."
fi fi
# Write out file for generating index/component/ilm templates # Write out file for generating index/component/ilm templates
if latest_installed_package_list=$(elastic_fleet_installed_packages); then latest_installed_package_list=$(elastic_fleet_installed_packages)
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
fi
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
# Refresh installed component template list # Refresh installed component template list
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.') latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
@@ -3,36 +3,11 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
{%- from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} {% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %}
. /usr/sbin/so-common . /usr/sbin/so-common
FORCE_UPDATE=false
UPDATE_CERTS=false
LOGSTASH_PILLAR_CONFIG_YAML="{{ LOGSTASH_CONFIG_YAML }}"
LOGSTASH_PILLAR_STATE_FILE="/opt/so/state/esfleet_logstash_config_pillar"
while [[ $# -gt 0 ]]; do
case $1 in
-f|--force)
FORCE_UPDATE=true
shift
;;
-c| --certs)
UPDATE_CERTS=true
FORCE_UPDATE=true
shift
;;
*)
echo "Unknown option $1"
echo "Usage: $0 [-f|--force] [-c|--certs]"
exit 1
;;
esac
done
# Only run on Managers # Only run on Managers
if ! is_manager_node; then if ! is_manager_node; then
printf "Not a Manager Node... Exiting" printf "Not a Manager Node... Exiting"
@@ -40,53 +15,8 @@ if ! is_manager_node; then
fi fi
function update_logstash_outputs() { function update_logstash_outputs() {
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then # Generate updated JSON payload
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl') JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
# Revert escaped \\n to \n for jq
LOGSTASH_PILLAR_CONFIG_YAML=$(printf '%b' "$LOGSTASH_PILLAR_CONFIG_YAML")
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
if [[ "$UPDATE_CERTS" != "true" ]]; then
# Reuse existing secret
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--argjson SECRETS "$SECRETS" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else
# Update certs, creating new secret
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
fi
else
if [[ "$UPDATE_CERTS" != "true" ]]; then
# Reuse existing ssl config
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG}')
else
# Update ssl config
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
fi
fi
fi
# Update Logstash Outputs # Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
@@ -95,11 +25,7 @@ function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup # Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl') SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
if [[ "$UPDATE_CERTS" != "true" ]]; then
# Update policy when fleet has secrets enabled # Update policy when fleet has secrets enabled
JSON_STRING=$(jq -n \ JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \ --arg UPDATEDLIST "$NEW_LIST_JSON" \
@@ -107,30 +33,11 @@ function update_kafka_outputs() {
--argjson SECRETS "$SECRETS" \ --argjson SECRETS "$SECRETS" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}') '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else else
# Update certs, creating new secret
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKACA "$KAFKACA" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": {"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"secrets": {"ssl":{"key": $KAFKAKEY }}}')
fi
else
if [[ "$UPDATE_CERTS" != "true" ]]; then
# Update policy when fleet has secrets disabled or policy hasn't been force updated # Update policy when fleet has secrets disabled or policy hasn't been force updated
JSON_STRING=$(jq -n \ JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \ --arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \ --argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
else
# Update ssl config
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKACA "$KAFKACA" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }}')
fi
fi fi
# Update Kafka outputs # Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
@@ -142,7 +49,7 @@ function update_kafka_outputs() {
{% if GLOBALS.pipeline == "KAFKA" %} {% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs # Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -153,7 +60,7 @@ function update_kafka_outputs() {
# Get the current list of kafka outputs & hash them # Get the current list of kafka outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=() declare -a NEW_LIST=()
@@ -168,7 +75,7 @@ function update_kafka_outputs() {
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #} {# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %} {% else %}
# Get current list of Logstash Outputs # Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -176,19 +83,10 @@ function update_kafka_outputs() {
printf "Failed to query for current Logstash Outputs..." printf "Failed to query for current Logstash Outputs..."
exit 1 exit 1
fi fi
# logstash adv config - compare pillar to last state file value
if [[ -f "$LOGSTASH_PILLAR_STATE_FILE" ]]; then
PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML=$(cat "$LOGSTASH_PILLAR_STATE_FILE")
if [[ "$LOGSTASH_PILLAR_CONFIG_YAML" != "$PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML" ]]; then
echo "Logstash pillar config has changed - forcing update"
FORCE_UPDATE=true
fi
echo "$LOGSTASH_PILLAR_CONFIG_YAML" > "$LOGSTASH_PILLAR_STATE_FILE"
fi
# Get the current list of Logstash outputs & hash them # Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=() declare -a NEW_LIST=()
@@ -237,10 +135,10 @@ function update_kafka_outputs() {
# Sort & hash the new list of Logstash Outputs # Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}') NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
# Compare the current & new list of outputs - if different, update the Logstash outputs # Compare the current & new list of outputs - if different, update the Logstash outputs
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n" printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
@@ -10,16 +10,8 @@
{%- for PACKAGE in SUPPORTED_PACKAGES %} {%- for PACKAGE in SUPPORTED_PACKAGES %}
echo "Setting up {{ PACKAGE }} package..." echo "Setting up {{ PACKAGE }} package..."
if VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}"); then VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
# packages loaded by this script should never fail to install and REQUIRED before an installation of SO can be considered successful
echo -e "\nERROR: Failed to install default integration package -- $PACKAGE $VERSION"
exit 1
fi
else
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
exit 1
fi
echo echo
{%- endfor %} {%- endfor %}
echo echo
@@ -10,15 +10,8 @@
{%- for PACKAGE in SUPPORTED_PACKAGES %} {%- for PACKAGE in SUPPORTED_PACKAGES %}
echo "Upgrading {{ PACKAGE }} package..." echo "Upgrading {{ PACKAGE }} package..."
if VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}"); then VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}")
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
# exit 1 on failure to upgrade a default package, allow salt to handle retries
echo -e "\nERROR: Failed to upgrade $PACKAGE to version: $VERSION"
exit 1
fi
else
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
fi
echo echo
{%- endfor %} {%- endfor %}
echo echo
@@ -23,17 +23,18 @@ if [[ "$RETURN_CODE" != "0" ]]; then
exit 1 exit 1
fi fi
ALIASES=(.fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest) ALIASES=".fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
for ALIAS in "${ALIASES[@]}"; do for ALIAS in ${ALIASES}
do
# Get all concrete indices from alias # Get all concrete indices from alias
if INDXS_RAW=$(curl -sK /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" --fail 2>/dev/null); then INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
INDXS=$(echo "$INDXS_RAW" | jq -r '.aliases[].indices[]')
# Delete all resolved indices # Delete all resolved indices
for INDX in ${INDXS}; do for INDX in ${INDXS}
do
status "Deleting $INDX" status "Deleting $INDX"
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
done done
fi
done done
# Restarting Kibana... # Restarting Kibana...
@@ -50,61 +51,22 @@ if [[ "$RETURN_CODE" != "0" ]]; then
fi fi
printf "\n### Create ES Token ###\n" printf "\n### Create ES Token ###\n"
if ESTOKEN_RAW=$(fleet_api "service_tokens" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
ESTOKEN=$(echo "$ESTOKEN_RAW" | jq -r .value)
else
echo -e "\nFailed to create ES token..."
exit 1
fi
### Create Outputs, Fleet Policy and Fleet URLs ### ### Create Outputs, Fleet Policy and Fleet URLs ###
# Create the Manager Elasticsearch Output first and set it as the default output # Create the Manager Elasticsearch Output first and set it as the default output
printf "\nAdd Manager Elasticsearch Output...\n" printf "\nAdd Manager Elasticsearch Output...\n"
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]') ESCACRT=$(openssl x509 -in $INTCA)
JSON_STRING=$(jq -n \ JSON_STRING=$( jq -n \
--arg ESCACRT "$ESCACRT" \ --arg ESCACRT "$ESCACRT" \
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}') '{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
echo -e "\nFailed to create so-elasticsearch_manager policy..."
exit 1
fi
printf "\n\n" printf "\n\n"
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
echo -e "\n failed to update so-manager_elasticsearch"
exit 1
fi
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
status "Verifying so-manager_elasticsearch policy is configured as the current default"
# Grab the fleet-default-output policy instead of so-manager_elasticsearch, because a weird state can exist where both fleet-default-output & so-elasticsearch_manager can be set as the active default output for logs / metrics. Resulting in logs not ingesting on import/eval nodes
if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
else
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
exit 1
fi
else
# fleet-output-policy is created automatically by fleet when started. Should always exist on any installation type
echo -e "\nDefault fleet-default-output policy doesn't exist...\n"
exit 1
fi
# Create the Manager Fleet Server Host Agent Policy # Create the Manager Fleet Server Host Agent Policy
# This has to be done while the Elasticsearch Output is set to the default Output # This has to be done while the Elasticsearch Output is set to the default Output
printf "Create Manager Fleet Server Policy...\n" printf "Create Manager Fleet Server Policy...\n"
if ! elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"; then elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
echo -e "\n Failed to create Manager fleet server policy..."
exit 1
fi
# Modify the default integration policy to update the policy_id with the correct naming # Modify the default integration policy to update the policy_id with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" ' UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
@@ -112,10 +74,7 @@ UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json) .name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Add the Fleet Server Integration to the new Fleet Policy # Add the Fleet Server Integration to the new Fleet Policy
if ! elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"; then elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
echo -e "\nFailed to create Fleet server integration for Manager.."
exit 1
fi
# Now we can create the Logstash Output and set it to to be the default Output # Now we can create the Logstash Output and set it to to be the default Output
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n" printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
@@ -127,12 +86,9 @@ JSON_STRING=$( jq -n \
--arg LOGSTASHCRT "$LOGSTASHCRT" \ --arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \ --arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCA "$LOGSTASHCA" \ --arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}' '{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
) )
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo -e "\nFailed to create logstash fleet output"
exit 1
fi
printf "\n\n" printf "\n\n"
{%- endif %} {%- endif %}
@@ -150,10 +106,7 @@ else
fi fi
## This array replaces whatever URLs are currently configured ## This array replaces whatever URLs are currently configured
if ! fleet_api "fleet_server_hosts" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo -e "\nFailed to add manager fleet URL"
exit 1
fi
printf "\n\n" printf "\n\n"
### Create Policies & Associated Integration Configuration ### ### Create Policies & Associated Integration Configuration ###
@@ -164,22 +117,13 @@ printf "\n\n"
/usr/sbin/so-elasticsearch-templates-load /usr/sbin/so-elasticsearch-templates-load
# Initial Endpoints Policy # Initial Endpoints Policy
if ! elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"; then elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
echo -e "\nFailed to create endpoints-initial policy..."
exit 1
fi
# Grid Nodes - General Policy # Grid Nodes - General Policy
if ! elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"; then elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"
echo -e "\nFailed to create so-grid-nodes_general policy..."
exit 1
fi
# Grid Nodes - Heavy Node Policy # Grid Nodes - Heavy Node Policy
if ! elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"; then elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"
echo -e "\nFailed to create so-grid-nodes_heavy policy..."
exit 1
fi
# Load Integrations for default policies # Load Integrations for default policies
so-elastic-fleet-integration-policy-load so-elastic-fleet-integration-policy-load
@@ -191,34 +135,14 @@ JSON_STRING=$( jq -n \
'{"name":$NAME,"host":$URL,"is_default":true}' '{"name":$NAME,"host":$URL,"is_default":true}'
) )
if ! fleet_api "agent_download_sources" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
echo -e "\nFailed to update Elastic Agent artifact URL"
exit 1
fi
### Finalization ### ### Finalization ###
# Query for Enrollment Tokens for default policies # Query for Enrollment Tokens for default policies
if ENDPOINTSENROLLMENTOKEN_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
ENDPOINTSENROLLMENTOKEN=$(echo "$ENDPOINTSENROLLMENTOKEN_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') GRIDNODESENROLLMENTOKENGENERAL=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
else GRIDNODESENROLLMENTOKENHEAVY=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
echo -e "\nFailed to query for Endpoints enrollment token"
exit 1
fi
if GRIDNODESENROLLMENTOKENGENERAL_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
GRIDNODESENROLLMENTOKENGENERAL=$(echo "$GRIDNODESENROLLMENTOKENGENERAL_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
else
echo -e "\nFailed to query for Grid nodes - General enrollment token"
exit 1
fi
if GRIDNODESENROLLMENTOKENHEAVY_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
GRIDNODESENROLLMENTOKENHEAVY=$(echo "$GRIDNODESENROLLMENTOKENHEAVY_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
else
echo -e "\nFailed to query for Grid nodes - Heavy enrollment token"
exit 1
fi
# Store needed data in minion pillar # Store needed data in minion pillar
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
@@ -241,11 +165,9 @@ printf '%s\n'\
"" >> "$global_pillar_file" "" >> "$global_pillar_file"
# Call Elastic-Fleet Salt State # Call Elastic-Fleet Salt State
printf "\nApplying elasticfleet state"
salt-call state.apply elasticfleet queue=True salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node # Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers so-elastic-agent-gen-installers
printf "\nApplying elasticfleet.install_agent_grid state"
salt-call state.apply elasticfleet.install_agent_grid queue=True salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0 exit 0
@@ -23,7 +23,7 @@ function update_fleet_urls() {
} }
# Get current list of Fleet Server URLs # Get current list of Fleet Server URLs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null) RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default')
# Check to make sure that the server responded with good data - else, bail from script # Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
@@ -34,11 +34,6 @@ if [[ "$RETURN_CODE" != "0" ]]; then
exit 1 exit 1
fi fi
if [[ ! -f /etc/pki/elasticfleet-kafka.crt || ! -f /etc/pki/elasticfleet-kafka.key ]]; then
echo -e "\nKafka certificates not found, can't setup Elastic Fleet output policy for Kafka...\n"
exit 1
fi
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
@@ -52,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg KAFKACA "$KAFKACA" \ --arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
@@ -72,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\ --arg ENABLED_DISABLED "$ENABLED_DISABLED"\
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
--argjson HOSTS "$HOSTS" \ --argjson HOSTS "$HOSTS" \
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
+2 -2
View File
@@ -26,14 +26,14 @@ catrustscript:
GLOBALS: {{ GLOBALS }} GLOBALS: {{ GLOBALS }}
{% endif %} {% endif %}
elasticsearch_cacerts: cacertz:
file.managed: file.managed:
- name: /opt/so/conf/ca/cacerts - name: /opt/so/conf/ca/cacerts
- source: salt://elasticsearch/cacerts - source: salt://elasticsearch/cacerts
- user: 939 - user: 939
- group: 939 - group: 939
elasticsearch_capems: capemz:
file.managed: file.managed:
- name: /opt/so/conf/ca/tls-ca-bundle.pem - name: /opt/so/conf/ca/tls-ca-bundle.pem
- source: salt://elasticsearch/tls-ca-bundle.pem - source: salt://elasticsearch/tls-ca-bundle.pem
+5
View File
@@ -5,6 +5,11 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
include:
- ssl
- elasticsearch.ca
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
+47 -112
View File
@@ -1,13 +1,11 @@
elasticsearch: elasticsearch:
enabled: false enabled: false
version: 9.0.8 version: 8.18.6
index_clean: true index_clean: true
config: config:
action: action:
destructive_requires_name: true destructive_requires_name: true
cluster: cluster:
logsdb:
enabled: false
routing: routing:
allocation: allocation:
disk: disk:
@@ -74,8 +72,6 @@ elasticsearch:
actions: actions:
set_priority: set_priority:
priority: 0 priority: 0
allocate:
number_of_replicas: ""
min_age: 60d min_age: 60d
delete: delete:
actions: actions:
@@ -88,25 +84,11 @@ elasticsearch:
max_primary_shard_size: 50gb max_primary_shard_size: 50gb
set_priority: set_priority:
priority: 100 priority: 100
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
min_age: 0ms min_age: 0ms
warm: warm:
actions: actions:
set_priority: set_priority:
priority: 50 priority: 50
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
allocate:
number_of_replicas: ""
min_age: 30d min_age: 30d
so-case: so-case:
index_sorting: false index_sorting: false
@@ -263,6 +245,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-detection: so-detection:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -301,19 +284,6 @@ elasticsearch:
hot: hot:
actions: {} actions: {}
min_age: 0ms min_age: 0ms
sos-backup:
index_sorting: false
index_template:
composed_of: []
ignore_missing_component_templates: []
index_patterns:
- sos-backup-*
priority: 501
template:
settings:
index:
number_of_replicas: 0
number_of_shards: 1
so-assistant-chat: so-assistant-chat:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -614,6 +584,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-import: so-import:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -693,6 +664,7 @@ elasticsearch:
match_mapping_type: string match_mapping_type: string
settings: settings:
index: index:
final_pipeline: .fleet_final_pipeline-1
lifecycle: lifecycle:
name: so-import-logs name: so-import-logs
mapping: mapping:
@@ -858,13 +830,53 @@ elasticsearch:
composed_of: composed_of:
- agent-mappings - agent-mappings
- dtc-agent-mappings - dtc-agent-mappings
- base-mappings
- dtc-base-mappings
- client-mappings
- dtc-client-mappings
- container-mappings
- destination-mappings
- dtc-destination-mappings
- pb-override-destination-mappings
- dll-mappings
- dns-mappings
- dtc-dns-mappings
- ecs-mappings
- dtc-ecs-mappings
- error-mappings
- event-mappings - event-mappings
- dtc-event-mappings
- file-mappings - file-mappings
- dtc-file-mappings
- group-mappings
- host-mappings - host-mappings
- dtc-host-mappings - dtc-host-mappings
- http-mappings - http-mappings
- dtc-http-mappings - dtc-http-mappings
- log-mappings
- metadata-mappings - metadata-mappings
- network-mappings
- dtc-network-mappings
- observer-mappings
- dtc-observer-mappings
- organization-mappings
- package-mappings
- process-mappings
- dtc-process-mappings
- related-mappings
- rule-mappings
- dtc-rule-mappings
- server-mappings
- service-mappings
- dtc-service-mappings
- source-mappings
- dtc-source-mappings
- pb-override-source-mappings
- threat-mappings
- tls-mappings
- url-mappings
- user_agent-mappings
- dtc-user_agent-mappings
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
data_stream: data_stream:
@@ -920,6 +932,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-hydra: so-hydra:
close: 30 close: 30
delete: 365 delete: 365
@@ -1030,6 +1043,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-lists: so-lists:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -1113,8 +1127,6 @@ elasticsearch:
actions: actions:
set_priority: set_priority:
priority: 0 priority: 0
allocate:
number_of_replicas: ""
min_age: 60d min_age: 60d
delete: delete:
actions: actions:
@@ -1127,25 +1139,11 @@ elasticsearch:
max_primary_shard_size: 50gb max_primary_shard_size: 50gb
set_priority: set_priority:
priority: 100 priority: 100
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
min_age: 0ms min_age: 0ms
warm: warm:
actions: actions:
set_priority: set_priority:
priority: 50 priority: 50
allocate:
number_of_replicas: ""
forcemerge:
max_num_segments: ""
shrink:
max_primary_shard_size: ""
method: COUNT
number_of_shards: ""
min_age: 30d min_age: 30d
so-logs-detections_x_alerts: so-logs-detections_x_alerts:
index_sorting: false index_sorting: false
@@ -1993,70 +1991,6 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
so-logs-elasticsearch_x_server:
index_sorting: false
index_template:
composed_of:
- logs-elasticsearch.server@package
- logs-elasticsearch.server@custom
- so-fleet_integrations.ip_mappings-1
- so-fleet_globals-1
- so-fleet_agent_id_verification-1
data_stream:
allow_custom_routing: false
hidden: false
ignore_missing_component_templates:
- logs-elasticsearch.server@custom
index_patterns:
- logs-elasticsearch.server-*
priority: 501
template:
mappings:
_meta:
managed: true
managed_by: security_onion
package:
name: elastic_agent
settings:
index:
lifecycle:
name: so-logs-elasticsearch.server-logs
mapping:
total_fields:
limit: 5000
number_of_replicas: 0
sort:
field: '@timestamp'
order: desc
policy:
_meta:
managed: true
managed_by: security_onion
package:
name: elastic_agent
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 60d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-endpoint_x_actions: so-logs-endpoint_x_actions:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -3125,6 +3059,7 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
warm: 7
so-logs-system_x_application: so-logs-system_x_application:
index_sorting: false index_sorting: false
index_template: index_template:
+13 -11
View File
@@ -14,9 +14,6 @@
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} {% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
include: include:
- ca
- elasticsearch.ca
- elasticsearch.ssl
- elasticsearch.config - elasticsearch.config
- elasticsearch.sostatus - elasticsearch.sostatus
@@ -64,7 +61,11 @@ so-elasticsearch:
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
- /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro - /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro
{% if GLOBALS.is_manager %}
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro - /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
{% endif %}
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
@@ -81,21 +82,22 @@ so-elasticsearch:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
- watch: - watch:
- file: trusttheca - file: cacertz
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- require: - require:
- file: trusttheca
- x509: elasticsearch_crt
- x509: elasticsearch_key
- file: elasticsearch_cacerts
- file: esyml - file: esyml
- file: eslog4jfile - file: eslog4jfile
- file: nsmesdir - file: nsmesdir
- file: eslogdir - file: eslogdir
- file: cacertz
- x509: /etc/pki/elasticsearch.crt
- x509: /etc/pki/elasticsearch.key
- file: elasticp12perms - file: elasticp12perms
{% if GLOBALS.is_manager %}
- x509: pki_public_ca_crt
{% else %}
- x509: trusttheca
{% endif %}
- cmd: auth_users_roles_inode - cmd: auth_users_roles_inode
- cmd: auth_users_inode - cmd: auth_users_inode
+20 -202
View File
@@ -6,207 +6,25 @@
}, },
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n", "description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [ "processors": [
{ { "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
"set": { { "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
"ignore_failure": true, { "split": { "if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')", "field":"data_stream.dataset", "separator":"\\.", "target_field":"datastream_dataset_temp", "ignore_missing":true } },
"field": "event.module", { "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
"value": "elastic_agent" { "set": { "if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'", "field":"event.module", "value":"{{ datastream_dataset_temp.0 }}", "ignore_failure":true, "ignore_empty_value":true, "description":"Fix EA network packet capture" } },
} { "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
}, { "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}", "allow_duplicates": false } },
{ { "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
"split": { { "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
"if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", { "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
"field": "event.dataset", { "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
"separator": "\\.", { "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.dataset", "value": "import" } },
"target_field": "module_temp" { "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.namespace", "value": "so" } },
} { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
}, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
"split": { { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
"if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')", {"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
"field": "data_stream.dataset", {"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
"separator": "\\.", { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
"target_field": "datastream_dataset_temp",
"ignore_missing": true
}
},
{
"set": {
"if": "ctx.module_temp != null",
"override": true,
"field": "event.module",
"value": "{{module_temp.0}}"
}
},
{
"set": {
"if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'",
"field": "event.module",
"value": "{{ datastream_dataset_temp.0 }}",
"ignore_failure": true,
"ignore_empty_value": true,
"description": "Fix EA network packet capture"
}
},
{
"gsub": {
"if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')",
"field": "event.dataset",
"pattern": "^[^.]*.",
"replacement": "",
"target_field": "dataset_tag_temp"
}
},
{
"append": {
"if": "ctx.dataset_tag_temp != null",
"field": "tags",
"value": "{{dataset_tag_temp}}",
"allow_duplicates": false
}
},
{
"set": {
"if": "ctx.network?.direction == 'egress'",
"override": true,
"field": "network.initiated",
"value": "true"
}
},
{
"set": {
"if": "ctx.network?.direction == 'ingress'",
"override": true,
"field": "network.initiated",
"value": "false"
}
},
{
"set": {
"if": "ctx.network?.type == 'ipv4'",
"override": true,
"field": "destination.ipv6",
"value": "false"
}
},
{
"set": {
"if": "ctx.network?.type == 'ipv6'",
"override": true,
"field": "destination.ipv6",
"value": "true"
}
},
{
"set": {
"if": "ctx.tags != null && ctx.tags.contains('import')",
"override": true,
"field": "data_stream.dataset",
"value": "import"
}
},
{
"set": {
"if": "ctx.tags != null && ctx.tags.contains('import')",
"override": true,
"field": "data_stream.namespace",
"value": "so"
}
},
{
"community_id": {
"if": "ctx.event?.dataset == 'endpoint.events.network'",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx.event?.module == 'fim'",
"override": true,
"field": "event.module",
"value": "file_integrity"
}
},
{
"rename": {
"if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'",
"ignore_missing": true,
"field": "winlog.event_data.Threat Name",
"target_field": "winlog.event_data.threat_name"
}
},
{
"set": {
"if": "ctx?.metadata?.kafka != null",
"field": "kafka.id",
"value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'",
"field": "event.module",
"value": "elasticsearch"
}
},
{
"append": {
"field": "related.ip",
"value": [
"{{source.ip}}",
"{{destination.ip}}"
],
"allow_duplicates": false,
"if": "ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null",
"ignore_failure": true
}
},
{
"foreach": {
"field": "host.ip",
"processor": {
"append": {
"field": "related.ip",
"value": "{{_ingest._value}}",
"allow_duplicates": false
}
},
"if": "ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null",
"ignore_missing": true,
"description": "Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"
}
},
{
"pipeline": {
"name": ".fleet_final_pipeline-1",
"ignore_missing_pipeline": true
}
},
{
"remove": {
"field": "event.agent_id_status",
"ignore_missing": true,
"if": "ctx?.event?.agent_id_status == 'auth_metadata_missing'"
}
},
{
"remove": {
"field": [
"message2",
"type",
"fields",
"category",
"module",
"dataset",
"event.dataset_temp",
"dataset_tag_temp",
"module_temp",
"datastream_dataset_temp"
],
"ignore_missing": true,
"ignore_failure": true
}
}
] ]
} }
+6 -95
View File
@@ -1,98 +1,9 @@
{ {
"description": "kratos", "description" : "kratos",
"processors": [ "processors" : [
{ {"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
"set": { {"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
"field": "audience", {"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
"value": "access", { "pipeline": { "name": "common" } }
"override": false,
"ignore_failure": true
}
},
{
"set": {
"field": "event.dataset",
"ignore_empty_value": true,
"ignore_failure": true,
"value": "kratos.{{{audience}}}",
"media_type": "text/plain"
}
},
{
"set": {
"field": "event.action",
"ignore_failure": true,
"copy_from": "msg"
}
},
{
"rename": {
"field": "http_request",
"target_field": "http.request",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http_response",
"target_field": "http.response",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.path",
"target_field": "http.uri",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.query",
"target_field": "http.query",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.headers.user-agent",
"target_field": "http.useragent",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "file",
"target_field": "file.path",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "common"
}
}
] ]
} }
+12 -76
View File
@@ -1,79 +1,15 @@
{ {
"description": "suricata.alert", "description" : "suricata.alert",
"processors": [ "processors" : [
{ { "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } },
"set": { { "set": { "field": "tags","value": "alert" }},
"if": "ctx.event?.imported != true", { "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
"field": "_index", { "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
"value": "logs-suricata.alerts-so" { "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
} { "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
}, { "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
{ { "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
"set": { { "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
"field": "tags", { "pipeline": { "name": "common.nids" } }
"value": "alert"
}
},
{
"rename": {
"field": "message2.alert",
"target_field": "rule",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.signature",
"target_field": "rule.name",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.ref",
"target_field": "rule.version",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.signature_id",
"target_field": "rule.uuid",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "rule.signature_id",
"target_field": "rule.signature",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.payload_printable",
"target_field": "network.data.decoded",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"dissect": {
"field": "rule.rule",
"pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"pipeline": {
"name": "common.nids"
}
}
] ]
} }
+21 -146
View File
@@ -1,155 +1,30 @@
{ {
"description": "suricata.common", "description" : "suricata.common",
"processors": [ "processors" : [
{ { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
"json": { { "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
"field": "message", { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
"target_field": "message2", { "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
"ignore_failure": true { "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
} { "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
}, { "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
{ { "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
"rename": { { "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
"field": "message2.pkt_src", { "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
"target_field": "network.packet_source", { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
} { "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
}, { "set": { "field": "observer.name", "value": "{{agent.name}}" } },
{ { "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
"rename": { { "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
"field": "message2.proto", { "remove":{ "field": "agent", "ignore_failure": true } },
"target_field": "network.transport", {"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}},
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.in_iface",
"target_field": "observer.ingress.interface.name",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.flow_id",
"target_field": "log.id.uid",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.src_ip",
"target_field": "source.ip",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.src_port",
"target_field": "source.port",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.dest_ip",
"target_field": "destination.ip",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.dest_port",
"target_field": "destination.port",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.vlan",
"target_field": "network.vlan.id",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.community_id",
"target_field": "network.community_id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.xff",
"target_field": "xff.ip",
"ignore_missing": true
}
},
{
"set": {
"field": "event.dataset",
"value": "{{ message2.event_type }}"
}
},
{
"set": {
"field": "observer.name",
"value": "{{agent.name}}"
}
},
{
"set": {
"field": "event.ingested",
"value": "{{@timestamp}}"
}
},
{
"date": {
"field": "message2.timestamp",
"target_field": "@timestamp",
"formats": [
"ISO8601",
"UNIX"
],
"timezone": "UTC",
"ignore_failure": true
}
},
{
"remove": {
"field": "agent",
"ignore_failure": true
}
},
{
"append": {
"field": "related.ip",
"value": [
"{{source.ip}}",
"{{destination.ip}}"
],
"allow_duplicates": false,
"ignore_failure": true
}
},
{ {
"script": { "script": {
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }", "source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
"ignore_failure": false "ignore_failure": false
} }
}, },
{ { "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
"rename": {
"field": "message2.capture_file",
"target_field": "suricata.capture_file",
"ignore_missing": true
}
},
{
"pipeline": {
"if": "ctx?.event?.dataset != null",
"name": "suricata.{{event.dataset}}"
}
}
] ]
} }
+18 -133
View File
@@ -1,136 +1,21 @@
{ {
"description": "suricata.dns", "description" : "suricata.dns",
"processors": [ "processors" : [
{ { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
"rename": { { "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
"field": "message2.proto", { "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
"target_field": "network.transport", { "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
"ignore_missing": true { "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
} { "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
}, { "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
{ { "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
"rename": { { "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
"field": "message2.app_proto", { "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
"target_field": "network.protocol", { "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
"ignore_missing": true { "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
} { "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
}, { "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
{ { "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
"rename": { { "pipeline": { "name": "common" } }
"field": "message2.dns.type",
"target_field": "dns.query.type",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.tx_id",
"target_field": "dns.tx_id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.id",
"target_field": "dns.id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.version",
"target_field": "dns.version",
"ignore_missing": true
}
},
{
"pipeline": {
"name": "suricata.dnsv3",
"ignore_missing_pipeline": true,
"if": "ctx?.dns?.version != null && ctx?.dns?.version == 3",
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.dns.rrname",
"target_field": "dns.query.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.rrtype",
"target_field": "dns.query.type_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.flags",
"target_field": "dns.flags",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.qr",
"target_field": "dns.qr",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.rd",
"target_field": "dns.recursion.desired",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.ra",
"target_field": "dns.recursion.available",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.opcode",
"target_field": "dns.opcode",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.rcode",
"target_field": "dns.response.code_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.grouped.A",
"target_field": "dns.answers.data",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.dns.grouped.CNAME",
"target_field": "dns.answers.name",
"ignore_missing": true
}
},
{
"pipeline": {
"if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')",
"name": "dns.tld"
}
},
{
"pipeline": {
"name": "common"
}
}
] ]
} }
@@ -1,56 +0,0 @@
{
"processors": [
{
"rename": {
"field": "message2.dns.queries",
"target_field": "dns.queries",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}"
}
},
{
"script": {
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}"
}
},
{
"foreach": {
"field": "dns.queries",
"processor": {
"rename": {
"field": "_ingest._value.rrname",
"target_field": "_ingest._value.name",
"ignore_missing": true
}
},
"ignore_failure": true
}
},
{
"foreach": {
"field": "dns.queries",
"processor": {
"rename": {
"field": "_ingest._value.rrtype",
"target_field": "_ingest._value.type_name",
"ignore_missing": true
}
},
"ignore_failure": true
}
},
{
"pipeline": {
"name": "suricata.tld",
"ignore_missing_pipeline": true,
"if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0",
"ignore_failure": true
}
}
]
}
@@ -1,52 +0,0 @@
{
"processors": [
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}",
"ignore_failure": true
}
}
]
}
@@ -1,61 +0,0 @@
{
"description": "zeek.analyzer",
"processors": [
{
"set": {
"field": "event.dataset",
"value": "analyzer"
}
},
{
"remove": {
"field": [
"host"
],
"ignore_failure": true
}
},
{
"json": {
"field": "message",
"target_field": "message2",
"ignore_failure": true
}
},
{
"set": {
"field": "network.protocol",
"copy_from": "message2.analyzer_name",
"ignore_empty_value": true,
"if": "ctx?.message2?.analyzer_kind == 'protocol'"
}
},
{
"set": {
"field": "network.protocol",
"ignore_empty_value": true,
"if": "ctx?.message2?.analyzer_kind != 'protocol'",
"copy_from": "message2.proto"
}
},
{
"lowercase": {
"field": "network.protocol",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.failure_reason",
"target_field": "error.reason",
"ignore_missing": true
}
},
{
"pipeline": {
"name": "zeek.common"
}
}
]
}
+32 -224
View File
@@ -1,227 +1,35 @@
{ {
"description": "zeek.dns", "description" : "zeek.dns",
"processors": [ "processors" : [
{ { "set": { "field": "event.dataset", "value": "dns" } },
"set": { { "remove": { "field": ["host"], "ignore_failure": true } },
"field": "event.dataset", { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
"value": "dns" { "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
} { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
}, { "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } },
{ { "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } },
"remove": { { "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } },
"field": [ { "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } },
"host" { "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } },
], { "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } },
} { "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } },
}, { "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } },
{ { "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } },
"json": { { "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } },
"field": "message", { "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
"target_field": "message2", { "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
} { "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
}, { "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}},
{ { "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}},
"dot_expander": { { "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }},
"field": "id.orig_h", { "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } },
"path": "message2", { "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
"ignore_failure": true { "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
} { "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
}, { "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } },
{ { "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
"rename": { { "pipeline": { "name": "zeek.common" } }
"field": "message2.proto",
"target_field": "network.transport",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.trans_id",
"target_field": "dns.id",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rtt",
"target_field": "event.duration",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.query",
"target_field": "dns.query.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qclass",
"target_field": "dns.query.class",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qclass_name",
"target_field": "dns.query.class_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qtype",
"target_field": "dns.query.type",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.qtype_name",
"target_field": "dns.query.type_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rcode",
"target_field": "dns.response.code",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rcode_name",
"target_field": "dns.response.code_name",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.AA",
"target_field": "dns.authoritative",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.TC",
"target_field": "dns.truncated",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.RD",
"target_field": "dns.recursion.desired",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.RA",
"target_field": "dns.recursion.available",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.Z",
"target_field": "dns.reserved",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.answers",
"target_field": "dns.answers.name",
"ignore_missing": true
}
},
{
"foreach": {
"field": "dns.answers.name",
"processor": {
"pipeline": {
"name": "common.ip_validation"
}
},
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
"ignore_failure": true
}
},
{
"foreach": {
"field": "temp._valid_ips",
"processor": {
"append": {
"field": "dns.resolved_ip",
"allow_duplicates": false,
"value": "{{{_ingest._value}}}",
"ignore_failure": true
}
},
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
"ignore_failure": true
}
},
{
"script": {
"source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }",
"ignore_failure": true
}
},
{
"remove": {
"field": [
"temp"
],
"ignore_missing": true,
"ignore_failure": true
}
},
{
"rename": {
"field": "message2.TTLs",
"target_field": "dns.ttls",
"ignore_missing": true
}
},
{
"rename": {
"field": "message2.rejected",
"target_field": "dns.query.rejected",
"ignore_missing": true
}
},
{
"script": {
"lang": "painless",
"source": "ctx.dns.query.length = ctx.dns.query.name.length()",
"ignore_failure": true
}
},
{
"set": {
"if": "ctx._index == 'so-zeek'",
"field": "_index",
"value": "so-zeek_dns",
"override": true
}
},
{
"pipeline": {
"if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')",
"name": "dns.tld"
}
},
{
"pipeline": {
"name": "zeek.common"
}
}
] ]
} }
+20
View File
@@ -0,0 +1,20 @@
{
"description" : "zeek.dpd",
"processors" : [
{ "set": { "field": "event.dataset", "value": "dpd" } },
{ "remove": { "field": ["host"], "ignore_failure": true } },
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } },
{ "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } },
{ "pipeline": { "name": "zeek.common" } }
]
}
+1 -21
View File
@@ -20,28 +20,8 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.action.type = Delete appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = /var/log/elasticsearch appender.rolling.strategy.action.basepath = /var/log/elasticsearch
appender.rolling.strategy.action.condition.type = IfFileName appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = *.log.gz appender.rolling.strategy.action.condition.glob = *.gz
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
appender.rolling.strategy.action.condition.nested_condition.age = 7D appender.rolling.strategy.action.condition.nested_condition.age = 7D
appender.rolling_json.type = RollingFile
appender.rolling_json.name = rolling_json
appender.rolling_json.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.json
appender.rolling_json.layout.type = ECSJsonLayout
appender.rolling_json.layout.dataset = elasticsearch.server
appender.rolling_json.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.json.gz
appender.rolling_json.policies.type = Policies
appender.rolling_json.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling_json.policies.time.interval = 1
appender.rolling_json.policies.time.modulate = true
appender.rolling_json.strategy.type = DefaultRolloverStrategy
appender.rolling_json.strategy.action.type = Delete
appender.rolling_json.strategy.action.basepath = /var/log/elasticsearch
appender.rolling_json.strategy.action.condition.type = IfFileName
appender.rolling_json.strategy.action.condition.glob = *.json.gz
appender.rolling_json.strategy.action.condition.nested_condition.type = IfLastModified
appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
rootLogger.level = info rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling rootLogger.appenderRef.rolling.ref = rolling
rootLogger.appenderRef.rolling_json.ref = rolling_json
+7 -194
View File
@@ -27,13 +27,6 @@ elasticsearch:
readonly: True readonly: True
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
logsdb:
enabled:
description: Enables or disables the Elasticsearch logsdb index mode. When enabled, most logs-* datastreams will convert to logsdb from standard after rolling over.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
routing: routing:
allocation: allocation:
disk: disk:
@@ -84,6 +77,13 @@ elasticsearch:
custom008: *pipelines custom008: *pipelines
custom009: *pipelines custom009: *pipelines
custom010: *pipelines custom010: *pipelines
managed_integrations:
description: List of integrations to add into SOC config UI. Enter the full or partial integration name. Eg. 1password, 1pass
forcedType: "[]string"
multiline: True
global: True
advanced: True
helpLink: elasticsearch.html
index_settings: index_settings:
global_overrides: global_overrides:
index_template: index_template:
@@ -131,47 +131,6 @@ elasticsearch:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
forcedType: string
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
cold: cold:
min_age: min_age:
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
@@ -185,12 +144,6 @@ elasticsearch:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
warm: warm:
min_age: min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
@@ -205,52 +158,6 @@ elasticsearch:
forcedType: int forcedType: int
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
delete: delete:
min_age: min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
@@ -380,47 +287,6 @@ elasticsearch:
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
forcedType: string
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
warm: warm:
min_age: min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
@@ -448,52 +314,6 @@ elasticsearch:
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
shrink:
method:
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
options:
- COUNT
- SIZE
global: True
advanced: True
number_of_shards:
title: shard count
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
global: True
forcedType: int
advanced: True
max_primary_shard_size:
title: max shard size
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
regex: ^[0-9]+(?:gb|tb|pb)$
global: True
forcedType: string
advanced: True
allow_write_after_shrink:
description: Allow writes after shrink.
global: True
forcedType: bool
default: False
advanced: True
forcemerge:
max_num_segments:
description: Reduce the number of segments in each index shard and clean up deleted documents.
global: True
forcedType: int
advanced: True
index_codec:
title: compression
description: Use higher compression for stored fields at the cost of slower performance.
forcedType: bool
global: True
default: False
advanced: True
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
cold: cold:
min_age: min_age:
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
@@ -510,12 +330,6 @@ elasticsearch:
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
allocate:
number_of_replicas:
description: Set the number of replicas. Remains the same as the previous phase by default.
forcedType: int
global: True
advanced: True
delete: delete:
min_age: min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. description: Minimum age of index. ex. 90d - This determines when the index should be deleted. Its important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
@@ -578,7 +392,6 @@ elasticsearch:
so-logs-elastic_agent_x_metricbeat: *indexSettings so-logs-elastic_agent_x_metricbeat: *indexSettings
so-logs-elastic_agent_x_osquerybeat: *indexSettings so-logs-elastic_agent_x_osquerybeat: *indexSettings
so-logs-elastic_agent_x_packetbeat: *indexSettings so-logs-elastic_agent_x_packetbeat: *indexSettings
so-logs-elasticsearch_x_server: *indexSettings
so-metrics-endpoint_x_metadata: *indexSettings so-metrics-endpoint_x_metadata: *indexSettings
so-metrics-endpoint_x_metrics: *indexSettings so-metrics-endpoint_x_metrics: *indexSettings
so-metrics-endpoint_x_policy: *indexSettings so-metrics-endpoint_x_policy: *indexSettings
-66
View File
@@ -1,66 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ca/map.jinja' import CA %}
# Create a cert for elasticsearch
elasticsearch_key:
x509.private_key_managed:
- name: /etc/pki/elasticsearch.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticsearch_crt:
x509.certificate_managed:
- name: /etc/pki/elasticsearch.crt
- ca_server: {{ CA.server }}
- signing_policy: registry
- private_key: /etc/pki/elasticsearch.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 7
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:
- x509: /etc/pki/elasticsearch.key
elastickeyperms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.key
- mode: 640
- group: 930
elasticp12perms:
file.managed:
- replace: False
- name: /etc/pki/elasticsearch.p12
- mode: 640
- group: 930
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

Some files were not shown because too many files have changed in this diff Show More