mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge remote-tracking branch 'origin/2.4/dev' into idstools-refactor
This commit is contained in:
@@ -172,7 +172,15 @@ MANAGER_HOSTNAME = socket.gethostname()
|
||||
|
||||
def _download_image():
|
||||
"""
|
||||
Download and validate the Oracle Linux KVM image.
|
||||
Download and validate the Oracle Linux KVM image with retry logic and progress monitoring.
|
||||
|
||||
Features:
|
||||
- Detects stalled downloads (no progress for 30 seconds)
|
||||
- Retries up to 3 times on failure
|
||||
- Connection timeout of 30 seconds
|
||||
- Read timeout of 60 seconds
|
||||
- Cleans up partial downloads on failure
|
||||
|
||||
Returns:
|
||||
bool: True if successful or file exists with valid checksum, False on error
|
||||
"""
|
||||
@@ -186,25 +194,54 @@ def _download_image():
|
||||
|
||||
log.info("Starting image download process")
|
||||
|
||||
# Retry configuration
|
||||
max_attempts = 3
|
||||
retry_delay = 5 # seconds to wait between retry attempts
|
||||
stall_timeout = 30 # seconds without progress before considering download stalled
|
||||
connection_timeout = 30 # seconds to establish connection
|
||||
read_timeout = 60 # seconds to wait for data chunks
|
||||
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
log.info("Download attempt %d of %d", attempt, max_attempts)
|
||||
|
||||
try:
|
||||
# Download file
|
||||
# Download file with timeouts
|
||||
log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH)
|
||||
response = requests.get(IMAGE_URL, stream=True)
|
||||
response = requests.get(
|
||||
IMAGE_URL,
|
||||
stream=True,
|
||||
timeout=(connection_timeout, read_timeout)
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Get total file size for progress tracking
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_log_time = 0
|
||||
last_progress_time = time.time()
|
||||
last_downloaded_size = 0
|
||||
|
||||
# Save file with progress logging
|
||||
# Save file with progress logging and stall detection
|
||||
with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk: # filter out keep-alive new chunks
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
current_time = time.time()
|
||||
|
||||
# Check for stalled download
|
||||
if downloaded_size > last_downloaded_size:
|
||||
# Progress made, reset stall timer
|
||||
last_progress_time = current_time
|
||||
last_downloaded_size = downloaded_size
|
||||
elif current_time - last_progress_time > stall_timeout:
|
||||
# No progress for stall_timeout seconds
|
||||
raise Exception(
|
||||
f"Download stalled: no progress for {stall_timeout} seconds "
|
||||
f"at {downloaded_size}/{total_size} bytes"
|
||||
)
|
||||
|
||||
# Log progress every second
|
||||
current_time = time.time()
|
||||
if current_time - last_log_time >= 1:
|
||||
progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0
|
||||
log.info("Progress - %.1f%% (%d/%d bytes)",
|
||||
@@ -212,17 +249,50 @@ def _download_image():
|
||||
last_log_time = current_time
|
||||
|
||||
# Validate downloaded file
|
||||
log.info("Download complete, validating checksum...")
|
||||
if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256):
|
||||
log.error("Checksum validation failed on attempt %d", attempt)
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download...")
|
||||
continue
|
||||
else:
|
||||
log.error("All download attempts failed due to checksum mismatch")
|
||||
return False
|
||||
|
||||
log.info("Successfully downloaded and validated Oracle Linux KVM image")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
log.error("Error downloading hypervisor image: %s", str(e))
|
||||
except requests.exceptions.Timeout as e:
|
||||
log.error("Download attempt %d failed: Timeout - %s", attempt, str(e))
|
||||
if os.path.exists(IMAGE_PATH):
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download in %d seconds...", retry_delay)
|
||||
time.sleep(retry_delay)
|
||||
else:
|
||||
log.error("All download attempts failed due to timeout")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
log.error("Download attempt %d failed: Network error - %s", attempt, str(e))
|
||||
if os.path.exists(IMAGE_PATH):
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download in %d seconds...", retry_delay)
|
||||
time.sleep(retry_delay)
|
||||
else:
|
||||
log.error("All download attempts failed due to network errors")
|
||||
|
||||
except Exception as e:
|
||||
log.error("Download attempt %d failed: %s", attempt, str(e))
|
||||
if os.path.exists(IMAGE_PATH):
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download in %d seconds...", retry_delay)
|
||||
time.sleep(retry_delay)
|
||||
else:
|
||||
log.error("All download attempts failed")
|
||||
|
||||
return False
|
||||
|
||||
def _check_ssh_keys_exist():
|
||||
@@ -419,25 +489,28 @@ def _ensure_hypervisor_host_dir(minion_id: str = None):
|
||||
log.error(f"Error creating hypervisor host directory: {str(e)}")
|
||||
return False
|
||||
|
||||
def _apply_dyanno_hypervisor_state():
|
||||
def _apply_dyanno_hypervisor_state(status):
|
||||
"""
|
||||
Apply the soc.dyanno.hypervisor state on the salt master.
|
||||
|
||||
This function applies the soc.dyanno.hypervisor state on the salt master
|
||||
to update the hypervisor annotation and ensure all hypervisor host directories exist.
|
||||
|
||||
Args:
|
||||
status: Status passed to the hypervisor annotation state
|
||||
|
||||
Returns:
|
||||
bool: True if state was applied successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
log.info("Applying soc.dyanno.hypervisor state on salt master")
|
||||
log.info(f"Applying soc.dyanno.hypervisor state on salt master with status: {status}")
|
||||
|
||||
# Initialize the LocalClient
|
||||
local = salt.client.LocalClient()
|
||||
|
||||
# Target the salt master to apply the soc.dyanno.hypervisor state
|
||||
target = MANAGER_HOSTNAME + '_*'
|
||||
state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', "pillar={'baseDomain': {'status': 'PreInit'}}", 'concurrent=True'], tgt_type='glob')
|
||||
state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', f"pillar={{'baseDomain': {{'status': '{status}'}}}}", 'concurrent=True'], tgt_type='glob')
|
||||
log.debug(f"state_result: {state_result}")
|
||||
# Check if state was applied successfully
|
||||
if state_result:
|
||||
@@ -454,17 +527,17 @@ def _apply_dyanno_hypervisor_state():
|
||||
success = False
|
||||
|
||||
if success:
|
||||
log.info("Successfully applied soc.dyanno.hypervisor state")
|
||||
log.info(f"Successfully applied soc.dyanno.hypervisor state with status: {status}")
|
||||
return True
|
||||
else:
|
||||
log.error("Failed to apply soc.dyanno.hypervisor state")
|
||||
log.error(f"Failed to apply soc.dyanno.hypervisor state with status: {status}")
|
||||
return False
|
||||
else:
|
||||
log.error("No response from salt master when applying soc.dyanno.hypervisor state")
|
||||
log.error(f"No response from salt master when applying soc.dyanno.hypervisor state with status: {status}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Error applying soc.dyanno.hypervisor state: {str(e)}")
|
||||
log.error(f"Error applying soc.dyanno.hypervisor state with status: {status}: {str(e)}")
|
||||
return False
|
||||
|
||||
def _apply_cloud_config_state():
|
||||
@@ -598,11 +671,6 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
log.warning("Failed to apply salt.cloud.config state, continuing with setup")
|
||||
# We don't return an error here as we want to continue with the setup process
|
||||
|
||||
# Apply the soc.dyanno.hypervisor state on the salt master
|
||||
if not _apply_dyanno_hypervisor_state():
|
||||
log.warning("Failed to apply soc.dyanno.hypervisor state, continuing with setup")
|
||||
# We don't return an error here as we want to continue with the setup process
|
||||
|
||||
log.info("Starting setup_environment in setup_hypervisor runner")
|
||||
|
||||
# Check if environment is already set up
|
||||
@@ -616,9 +684,12 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
|
||||
# Handle image setup if needed
|
||||
if not image_valid:
|
||||
_apply_dyanno_hypervisor_state('ImageDownloadStart')
|
||||
log.info("Starting image download/validation process")
|
||||
if not _download_image():
|
||||
log.error("Image download failed")
|
||||
# Update hypervisor annotation with failure status
|
||||
_apply_dyanno_hypervisor_state('ImageDownloadFailed')
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Image download failed',
|
||||
@@ -631,6 +702,8 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
log.info("Setting up SSH keys")
|
||||
if not _setup_ssh_keys():
|
||||
log.error("SSH key setup failed")
|
||||
# Update hypervisor annotation with failure status
|
||||
_apply_dyanno_hypervisor_state('SSHKeySetupFailed')
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'SSH key setup failed',
|
||||
@@ -655,6 +728,12 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
success = vm_result.get('success', False)
|
||||
log.info("Setup environment completed with status: %s", "SUCCESS" if success else "FAILED")
|
||||
|
||||
# Update hypervisor annotation with success status
|
||||
if success:
|
||||
_apply_dyanno_hypervisor_state('PreInit')
|
||||
else:
|
||||
_apply_dyanno_hypervisor_state('SetupFailed')
|
||||
|
||||
# If setup was successful and we have a minion_id, run highstate
|
||||
if success and minion_id:
|
||||
log.info("Running highstate on hypervisor %s", minion_id)
|
||||
|
||||
@@ -15,7 +15,6 @@ elasticfleet:
|
||||
logging:
|
||||
zeek:
|
||||
excluded:
|
||||
- analyzer
|
||||
- broker
|
||||
- capture_loss
|
||||
- cluster
|
||||
|
||||
@@ -1,15 +1,79 @@
|
||||
{
|
||||
"description" : "suricata.alert",
|
||||
"processors" : [
|
||||
{ "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } },
|
||||
{ "set": { "field": "tags","value": "alert" }},
|
||||
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
|
||||
{ "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common.nids" } }
|
||||
"description": "suricata.alert",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"if": "ctx.event?.imported != true",
|
||||
"field": "_index",
|
||||
"value": "logs-suricata.alerts-so"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "tags",
|
||||
"value": "alert"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.alert",
|
||||
"target_field": "rule",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature",
|
||||
"target_field": "rule.name",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.ref",
|
||||
"target_field": "rule.version",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature_id",
|
||||
"target_field": "rule.uuid",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature_id",
|
||||
"target_field": "rule.signature",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.payload_printable",
|
||||
"target_field": "network.data.decoded",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"dissect": {
|
||||
"field": "rule.rule",
|
||||
"pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common.nids"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,30 +1,155 @@
|
||||
{
|
||||
"description" : "suricata.common",
|
||||
"processors" : [
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
|
||||
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
|
||||
{ "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
|
||||
{ "remove":{ "field": "agent", "ignore_failure": true } },
|
||||
{"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}},
|
||||
"description": "suricata.common",
|
||||
"processors": [
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.pkt_src",
|
||||
"target_field": "network.packet_source",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.in_iface",
|
||||
"target_field": "observer.ingress.interface.name",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.flow_id",
|
||||
"target_field": "log.id.uid",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.src_ip",
|
||||
"target_field": "source.ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.src_port",
|
||||
"target_field": "source.port",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dest_ip",
|
||||
"target_field": "destination.ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dest_port",
|
||||
"target_field": "destination.port",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.vlan",
|
||||
"target_field": "network.vlan.id",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.community_id",
|
||||
"target_field": "network.community_id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.xff",
|
||||
"target_field": "xff.ip",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "{{ message2.event_type }}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "observer.name",
|
||||
"value": "{{agent.name}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.ingested",
|
||||
"value": "{{@timestamp}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"field": "message2.timestamp",
|
||||
"target_field": "@timestamp",
|
||||
"formats": [
|
||||
"ISO8601",
|
||||
"UNIX"
|
||||
],
|
||||
"timezone": "UTC",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "agent",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": [
|
||||
"{{source.ip}}",
|
||||
"{{destination.ip}}"
|
||||
],
|
||||
"allow_duplicates": false,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
|
||||
"ignore_failure": false
|
||||
}
|
||||
},
|
||||
{ "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.capture_file",
|
||||
"target_field": "suricata.capture_file",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx?.event?.dataset != null",
|
||||
"name": "suricata.{{event.dataset}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,21 +1,136 @@
|
||||
{
|
||||
"description" : "suricata.dns",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
"description": "suricata.dns",
|
||||
"processors": [
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.app_proto",
|
||||
"target_field": "network.protocol",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.type",
|
||||
"target_field": "dns.query.type",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.tx_id",
|
||||
"target_field": "dns.tx_id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.id",
|
||||
"target_field": "dns.id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.version",
|
||||
"target_field": "dns.version",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.dnsv3",
|
||||
"ignore_missing_pipeline": true,
|
||||
"if": "ctx?.dns?.version != null && ctx?.dns?.version == 3",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rrname",
|
||||
"target_field": "dns.query.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rrtype",
|
||||
"target_field": "dns.query.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.flags",
|
||||
"target_field": "dns.flags",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.qr",
|
||||
"target_field": "dns.qr",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rd",
|
||||
"target_field": "dns.recursion.desired",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.ra",
|
||||
"target_field": "dns.recursion.available",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.opcode",
|
||||
"target_field": "dns.opcode",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rcode",
|
||||
"target_field": "dns.response.code_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.grouped.A",
|
||||
"target_field": "dns.answers.data",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.grouped.CNAME",
|
||||
"target_field": "dns.answers.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||
"name": "dns.tld"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.queries",
|
||||
"target_field": "dns.queries",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.queries",
|
||||
"processor": {
|
||||
"rename": {
|
||||
"field": "_ingest._value.rrname",
|
||||
"target_field": "_ingest._value.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.queries",
|
||||
"processor": {
|
||||
"rename": {
|
||||
"field": "_ingest._value.rrtype",
|
||||
"target_field": "_ingest._value.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.tld",
|
||||
"ignore_missing_pipeline": true,
|
||||
"if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0",
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"description": "zeek.analyzer",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "analyzer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.protocol",
|
||||
"copy_from": "message2.analyzer_name",
|
||||
"ignore_empty_value": true,
|
||||
"if": "ctx?.message2?.analyzer_kind == 'protocol'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.protocol",
|
||||
"ignore_empty_value": true,
|
||||
"if": "ctx?.message2?.analyzer_kind != 'protocol'",
|
||||
"copy_from": "message2.proto"
|
||||
}
|
||||
},
|
||||
{
|
||||
"lowercase": {
|
||||
"field": "network.protocol",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.failure_reason",
|
||||
"target_field": "error.reason",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "zeek.common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,35 +1,227 @@
|
||||
{
|
||||
"description" : "zeek.dns",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.dataset", "value": "dns" } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}},
|
||||
{ "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}},
|
||||
{ "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }},
|
||||
{ "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } },
|
||||
{ "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
"description": "zeek.dns",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "dns"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"dot_expander": {
|
||||
"field": "id.orig_h",
|
||||
"path": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.trans_id",
|
||||
"target_field": "dns.id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rtt",
|
||||
"target_field": "event.duration",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.query",
|
||||
"target_field": "dns.query.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qclass",
|
||||
"target_field": "dns.query.class",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qclass_name",
|
||||
"target_field": "dns.query.class_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qtype",
|
||||
"target_field": "dns.query.type",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qtype_name",
|
||||
"target_field": "dns.query.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rcode",
|
||||
"target_field": "dns.response.code",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rcode_name",
|
||||
"target_field": "dns.response.code_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.AA",
|
||||
"target_field": "dns.authoritative",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.TC",
|
||||
"target_field": "dns.truncated",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.RD",
|
||||
"target_field": "dns.recursion.desired",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.RA",
|
||||
"target_field": "dns.recursion.available",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.Z",
|
||||
"target_field": "dns.reserved",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.answers",
|
||||
"target_field": "dns.answers.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.answers.name",
|
||||
"processor": {
|
||||
"pipeline": {
|
||||
"name": "common.ip_validation"
|
||||
}
|
||||
},
|
||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "temp._valid_ips",
|
||||
"processor": {
|
||||
"append": {
|
||||
"field": "dns.resolved_ip",
|
||||
"allow_duplicates": false,
|
||||
"value": "{{{_ingest._value}}}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"temp"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.TTLs",
|
||||
"target_field": "dns.ttls",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rejected",
|
||||
"target_field": "dns.query.rejected",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"source": "ctx.dns.query.length = ctx.dns.query.name.length()",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"if": "ctx._index == 'so-zeek'",
|
||||
"field": "_index",
|
||||
"value": "so-zeek_dns",
|
||||
"override": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||
"name": "dns.tld"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "zeek.common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"description" : "zeek.dpd",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.dataset", "value": "dpd" } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
}
|
||||
@@ -841,6 +841,10 @@
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
},
|
||||
"capture_file": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,7 +206,7 @@ fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo -e "\nDISCLAIMER: Script output is based on current data patterns, but are approximations soley intended to assist with getting a general ILM policy configured."
|
||||
echo -e "\nDISCLAIMER: Script output is based on current data patterns, but are approximations solely intended to assist with getting a general ILM policy configured."
|
||||
|
||||
ORG_ID=$(lookup_org_id)
|
||||
[ -n "$ORG_ID" ] || fail "Unable to resolve InfluxDB org id"
|
||||
@@ -756,7 +756,7 @@ if [ "$should_trigger_recommendations" = true ]; then
|
||||
|
||||
ilm_output=$(so-elasticsearch-query "${index}/_ilm/explain" --fail 2>/dev/null) || true
|
||||
if [ -n "$ilm_output" ]; then
|
||||
policy=$(echo "$ilm_output" | jq --arg idx "$index" -r ".indices[$idx].policy // empty" 2>/dev/null)
|
||||
policy=$(echo "$ilm_output" | jq -r '.indices | to_entries | .[0].value.policy // empty' 2>/dev/null)
|
||||
fi
|
||||
if [ -n "$policy" ] && [ -n "${policy_ages[$policy]:-}" ]; then
|
||||
delete_min_age=${policy_ages[$policy]}
|
||||
@@ -1024,9 +1024,13 @@ else
|
||||
if [ "$ilm_indices_immediate" -gt 0 ]; then
|
||||
echo -e "${BOLD}Deleting now:${NC} $ilm_indices_immediate indices (~${ilm_delete_immediate_gb} GB, $ilm_shards_immediate shards)"
|
||||
fi
|
||||
if [ "$ilm_indices_7d" -gt 0 ]; then
|
||||
if [ "$ilm_indices_30d" -gt 0 ]; then
|
||||
if [ "$ilm_delete_scheduled_30d" -gt 0 ] && [ "$ilm_indices_scheduled_30d" -gt 0 ]; then
|
||||
echo -e "${BOLD}Storage to be freed (30d):${NC} $ilm_indices_30d indices (~${ilm_delete_30d_gb} GB, $ilm_shards_30d shards)"
|
||||
elif [ "$ilm_indices_7d" -gt 0 ]; then
|
||||
echo -e "${BOLD}Storage to be freed (7d):${NC} $ilm_indices_7d indices (~${ilm_delete_7d_gb} GB, $ilm_shards_7d shards)"
|
||||
fi
|
||||
fi
|
||||
|
||||
log_title "LOG" "Retention Projection"
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ used during VM provisioning to add dedicated NSM storage volumes.
|
||||
This command creates and attaches a volume with the following settings:
|
||||
- VM Name: `vm1_sensor`
|
||||
- Volume Size: `500` GB
|
||||
- Volume Path: `/nsm/libvirt/volumes/vm1_sensor-nsm.img`
|
||||
- Volume Path: `/nsm/libvirt/volumes/vm1_sensor-nsm-<epoch_timestamp>.img`
|
||||
- Device: `/dev/vdb` (virtio-blk)
|
||||
- VM remains stopped after attachment
|
||||
|
||||
@@ -75,7 +75,8 @@ used during VM provisioning to add dedicated NSM storage volumes.
|
||||
|
||||
- The script automatically stops the VM if it's running before creating and attaching the volume.
|
||||
- Volumes are created with full pre-allocation for optimal performance.
|
||||
- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `<vm_name>-nsm.img`.
|
||||
- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `<vm_name>-nsm-<epoch_timestamp>.img`.
|
||||
- The epoch timestamp ensures unique volume names and prevents conflicts.
|
||||
- Volumes are attached as `/dev/vdb` using virtio-blk for high performance.
|
||||
- The script checks available disk space before creating the volume.
|
||||
- Ownership is set to `qemu:qemu` with permissions `640`.
|
||||
@@ -142,6 +143,7 @@ import socket
|
||||
import subprocess
|
||||
import pwd
|
||||
import grp
|
||||
import time
|
||||
import xml.etree.ElementTree as ET
|
||||
from io import StringIO
|
||||
from so_vm_utils import start_vm, stop_vm
|
||||
@@ -242,10 +244,13 @@ def create_volume_file(vm_name, size_gb, logger):
|
||||
Raises:
|
||||
VolumeCreationError: If volume creation fails
|
||||
"""
|
||||
# Define volume path (directory already created in main())
|
||||
volume_path = os.path.join(VOLUME_DIR, f"{vm_name}-nsm.img")
|
||||
# Generate epoch timestamp for unique volume naming
|
||||
epoch_timestamp = int(time.time())
|
||||
|
||||
# Check if volume already exists
|
||||
# Define volume path with epoch timestamp for uniqueness
|
||||
volume_path = os.path.join(VOLUME_DIR, f"{vm_name}-nsm-{epoch_timestamp}.img")
|
||||
|
||||
# Check if volume already exists (shouldn't be possible with timestamp)
|
||||
if os.path.exists(volume_path):
|
||||
logger.error(f"VOLUME: Volume already exists: {volume_path}")
|
||||
raise VolumeCreationError(f"Volume already exists: {volume_path}")
|
||||
|
||||
@@ -274,7 +274,7 @@ check_os_updates() {
|
||||
if [[ "$confirm" == [cC] ]]; then
|
||||
echo "Continuing without updating packages"
|
||||
elif [[ "$confirm" == [uU] ]]; then
|
||||
echo "Applying Grid Updates"
|
||||
echo "Applying Grid Updates. The following patch.os salt state may take a while depending on how many packages need to be updated."
|
||||
update_flag=true
|
||||
else
|
||||
echo "Exiting soup"
|
||||
@@ -1499,6 +1499,8 @@ upgrade_salt() {
|
||||
fi
|
||||
# Else do Ubuntu things
|
||||
elif [[ $is_deb ]]; then
|
||||
# ensure these files don't exist when upgrading from 3006.9 to 3006.16
|
||||
rm -f /etc/apt/keyrings/salt-archive-keyring-2023.pgp /etc/apt/sources.list.d/salt.list
|
||||
echo "Removing apt hold for Salt."
|
||||
echo ""
|
||||
apt-mark unhold "salt-common"
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
@@ -57,6 +58,17 @@ so-dockerregistry:
|
||||
- x509: registry_crt
|
||||
- x509: registry_key
|
||||
|
||||
wait_for_so-dockerregistry:
|
||||
http.wait_for_successful_query:
|
||||
- name: 'https://{{ GLOBALS.registry_host }}:5000/v2/'
|
||||
- ssl: True
|
||||
- verify_ssl: False
|
||||
- status: 200
|
||||
- wait_for: 120
|
||||
- request_interval: 5
|
||||
- require:
|
||||
- docker_container: so-dockerregistry
|
||||
|
||||
delete_so-dockerregistry_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
@@ -727,7 +727,8 @@ def check_hypervisor_disk_space(hypervisor: str, size_gb: int) -> Tuple[bool, Op
|
||||
result = local.cmd(
|
||||
hypervisor_minion,
|
||||
'cmd.run',
|
||||
["df -BG /nsm/libvirt/volumes | tail -1 | awk '{print $4}' | sed 's/G//'"]
|
||||
["df -BG /nsm/libvirt/volumes | tail -1 | awk '{print $4}' | sed 's/G//'"],
|
||||
kwarg={'python_shell': True}
|
||||
)
|
||||
|
||||
if not result or hypervisor_minion not in result:
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#======================================================================================================================
|
||||
set -o nounset # Treat unset variables as an error
|
||||
|
||||
__ScriptVersion="2025.02.24"
|
||||
__ScriptVersion="2025.09.03"
|
||||
__ScriptName="bootstrap-salt.sh"
|
||||
|
||||
__ScriptFullName="$0"
|
||||
@@ -48,6 +48,7 @@ __ScriptArgs="$*"
|
||||
# * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge
|
||||
# * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to
|
||||
# * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations
|
||||
# * BS_TMP_DIR: The directory to use for executing the installation (defaults to /tmp)
|
||||
#======================================================================================================================
|
||||
|
||||
|
||||
@@ -171,12 +172,12 @@ __check_config_dir() {
|
||||
|
||||
case "$CC_DIR_NAME" in
|
||||
http://*|https://*)
|
||||
__fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
||||
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||
__fetch_url "${_TMP_DIR}/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
||||
;;
|
||||
ftp://*)
|
||||
__fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
||||
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||
__fetch_url "${_TMP_DIR}/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
||||
;;
|
||||
*://*)
|
||||
echoerror "Unsupported URI scheme for $CC_DIR_NAME"
|
||||
@@ -194,22 +195,22 @@ __check_config_dir() {
|
||||
|
||||
case "$CC_DIR_NAME" in
|
||||
*.tgz|*.tar.gz)
|
||||
tar -zxf "${CC_DIR_NAME}" -C /tmp
|
||||
tar -zxf "${CC_DIR_NAME}" -C ${_TMP_DIR}
|
||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz")
|
||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz")
|
||||
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
||||
;;
|
||||
*.tbz|*.tar.bz2)
|
||||
tar -xjf "${CC_DIR_NAME}" -C /tmp
|
||||
tar -xjf "${CC_DIR_NAME}" -C ${_TMP_DIR}
|
||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz")
|
||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2")
|
||||
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
||||
;;
|
||||
*.txz|*.tar.xz)
|
||||
tar -xJf "${CC_DIR_NAME}" -C /tmp
|
||||
tar -xJf "${CC_DIR_NAME}" -C ${_TMP_DIR}
|
||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz")
|
||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz")
|
||||
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -245,6 +246,7 @@ __check_unparsed_options() {
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
_KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE}
|
||||
_TEMP_CONFIG_DIR="null"
|
||||
_TMP_DIR=${BS_TMP_DIR:-"/tmp"}
|
||||
_SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git"
|
||||
_SALT_REPO_URL=${_SALTSTACK_REPO_URL}
|
||||
_TEMP_KEYS_DIR="null"
|
||||
@@ -281,7 +283,7 @@ _SIMPLIFY_VERSION=$BS_TRUE
|
||||
_LIBCLOUD_MIN_VERSION="0.14.0"
|
||||
_EXTRA_PACKAGES=""
|
||||
_HTTP_PROXY=""
|
||||
_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt}
|
||||
_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-${_TMP_DIR}/git/salt}
|
||||
_NO_DEPS=$BS_FALSE
|
||||
_FORCE_SHALLOW_CLONE=$BS_FALSE
|
||||
_DISABLE_SSL=$BS_FALSE
|
||||
@@ -367,7 +369,7 @@ __usage() {
|
||||
also be specified. Salt installation will be ommitted, but some of the
|
||||
dependencies could be installed to write configuration with -j or -J.
|
||||
-d Disables checking if Salt services are enabled to start on system boot.
|
||||
You can also do this by touching /tmp/disable_salt_checks on the target
|
||||
You can also do this by touching ${BS_TMP_DIR}/disable_salt_checks on the target
|
||||
host. Default: \${BS_FALSE}
|
||||
-D Show debug output
|
||||
-f Force shallow cloning for git installations.
|
||||
@@ -424,6 +426,9 @@ __usage() {
|
||||
-r Disable all repository configuration performed by this script. This
|
||||
option assumes all necessary repository configuration is already present
|
||||
on the system.
|
||||
-T If set this overrides the use of /tmp for script execution. This is
|
||||
to allow for systems in which noexec is applied to temp filesystem mounts
|
||||
for security reasons
|
||||
-U If set, fully upgrade the system prior to bootstrapping Salt
|
||||
-v Display script version
|
||||
-V Install Salt into virtualenv
|
||||
@@ -436,7 +441,7 @@ __usage() {
|
||||
EOT
|
||||
} # ---------- end of function __usage ----------
|
||||
|
||||
while getopts ':hvnDc:g:Gx:k:s:MSWNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aqQ' opt
|
||||
while getopts ':hvnDc:g:Gx:k:s:MSWNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:T:aqQ' opt
|
||||
do
|
||||
case "${opt}" in
|
||||
|
||||
@@ -478,6 +483,7 @@ do
|
||||
a ) _PIP_ALL=$BS_TRUE ;;
|
||||
r ) _DISABLE_REPOS=$BS_TRUE ;;
|
||||
R ) _CUSTOM_REPO_URL=$OPTARG ;;
|
||||
T ) _TMP_DIR="$OPTARG" ;;
|
||||
J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;;
|
||||
j ) _CUSTOM_MINION_CONFIG=$OPTARG ;;
|
||||
q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;;
|
||||
@@ -495,10 +501,10 @@ done
|
||||
shift $((OPTIND-1))
|
||||
|
||||
# Define our logging file and pipe paths
|
||||
LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )"
|
||||
LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )"
|
||||
LOGFILE="${_TMP_DIR}/$( echo "$__ScriptName" | sed s/.sh/.log/g )"
|
||||
LOGPIPE="${_TMP_DIR}/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )"
|
||||
# Ensure no residual pipe exists
|
||||
rm "$LOGPIPE" 2>/dev/null
|
||||
rm -f "$LOGPIPE" 2>/dev/null
|
||||
|
||||
# Create our logging pipe
|
||||
# On FreeBSD we have to use mkfifo instead of mknod
|
||||
@@ -534,7 +540,7 @@ exec 2>"$LOGPIPE"
|
||||
# 14 SIGALRM
|
||||
# 15 SIGTERM
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
APT_ERR=$(mktemp /tmp/apt_error.XXXXXX)
|
||||
APT_ERR=$(mktemp ${_TMP_DIR}/apt_error.XXXXXX)
|
||||
__exit_cleanup() {
|
||||
EXIT_CODE=$?
|
||||
|
||||
@@ -927,6 +933,11 @@ if [ -d "${_VIRTUALENV_DIR}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure the designated temp directory exists
|
||||
if [ ! -d "${_TMP_DIR}" ]; then
|
||||
mkdir -p "${_TMP_DIR}"
|
||||
fi
|
||||
|
||||
#--- FUNCTION -------------------------------------------------------------------------------------------------------
|
||||
# NAME: __fetch_url
|
||||
# DESCRIPTION: Retrieves a URL and writes it to a given path
|
||||
@@ -1941,11 +1952,6 @@ __wait_for_apt(){
|
||||
# Timeout set at 15 minutes
|
||||
WAIT_TIMEOUT=900
|
||||
|
||||
## see if sync'ing the clocks helps
|
||||
if [ -f /usr/sbin/hwclock ]; then
|
||||
/usr/sbin/hwclock -s
|
||||
fi
|
||||
|
||||
# Run our passed in apt command
|
||||
"${@}" 2>"$APT_ERR"
|
||||
APT_RETURN=$?
|
||||
@@ -1996,14 +2002,14 @@ __apt_get_upgrade_noinput() {
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
__temp_gpg_pub() {
|
||||
if __check_command_exists mktemp; then
|
||||
tempfile="$(mktemp /tmp/salt-gpg-XXXXXXXX.pub 2>/dev/null)"
|
||||
tempfile="$(mktemp ${_TMP_DIR}/salt-gpg-XXXXXXXX.pub 2>/dev/null)"
|
||||
|
||||
if [ -z "$tempfile" ]; then
|
||||
echoerror "Failed to create temporary file in /tmp"
|
||||
echoerror "Failed to create temporary file in ${_TMP_DIR}"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
tempfile="/tmp/salt-gpg-$$.pub"
|
||||
tempfile="${_TMP_DIR}/salt-gpg-$$.pub"
|
||||
fi
|
||||
|
||||
echo $tempfile
|
||||
@@ -2043,7 +2049,7 @@ __rpm_import_gpg() {
|
||||
__fetch_url "$tempfile" "$url" || return 1
|
||||
|
||||
# At least on CentOS 8, a missing newline at the end causes:
|
||||
# error: /tmp/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key.
|
||||
# error: ${_TMP_DIR}/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key.
|
||||
# shellcheck disable=SC1003,SC2086
|
||||
sed -i -e '$a\' $tempfile
|
||||
|
||||
@@ -2109,7 +2115,7 @@ __git_clone_and_checkout() {
|
||||
fi
|
||||
|
||||
__SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)
|
||||
__SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-/tmp/git}"
|
||||
__SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-${_TMP_DIR}/git}"
|
||||
__SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)"
|
||||
__SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}"
|
||||
[ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}"
|
||||
@@ -2390,14 +2396,14 @@ __overwriteconfig() {
|
||||
|
||||
# Make a tempfile to dump any python errors into.
|
||||
if __check_command_exists mktemp; then
|
||||
tempfile="$(mktemp /tmp/salt-config-XXXXXXXX 2>/dev/null)"
|
||||
tempfile="$(mktemp ${_TMP_DIR}/salt-config-XXXXXXXX 2>/dev/null)"
|
||||
|
||||
if [ -z "$tempfile" ]; then
|
||||
echoerror "Failed to create temporary file in /tmp"
|
||||
echoerror "Failed to create temporary file in ${_TMP_DIR}"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
tempfile="/tmp/salt-config-$$"
|
||||
tempfile="${_TMP_DIR}/salt-config-$$"
|
||||
fi
|
||||
|
||||
if [ -n "$_PY_EXE" ]; then
|
||||
@@ -2760,8 +2766,8 @@ __install_salt_from_repo() {
|
||||
echoinfo "Installing salt using ${_py_exe}, $(${_py_exe} --version)"
|
||||
cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1
|
||||
|
||||
mkdir -p /tmp/git/deps
|
||||
echodebug "Created directory /tmp/git/deps"
|
||||
mkdir -p ${_TMP_DIR}/git/deps
|
||||
echodebug "Created directory ${_TMP_DIR}/git/deps"
|
||||
|
||||
if [ ${DISTRO_NAME_L} = "ubuntu" ] && [ "$DISTRO_MAJOR_VERSION" -eq 22 ]; then
|
||||
echodebug "Ubuntu 22.04 has problem with base.txt requirements file, not parsing sys_platform == 'win32', upgrading from default pip works"
|
||||
@@ -2774,7 +2780,7 @@ __install_salt_from_repo() {
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -f /tmp/git/deps/*
|
||||
rm -f ${_TMP_DIR}/git/deps/*
|
||||
|
||||
echodebug "Installing Salt requirements from PyPi, ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r requirements/static/ci/py${_py_version}/linux.txt"
|
||||
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r "requirements/static/ci/py${_py_version}/linux.txt"
|
||||
@@ -2799,7 +2805,7 @@ __install_salt_from_repo() {
|
||||
|
||||
echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'"
|
||||
${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" bdist_wheel || return 1
|
||||
mv dist/salt*.whl /tmp/git/deps/ || return 1
|
||||
mv dist/salt*.whl ${_TMP_DIR}/git/deps/ || return 1
|
||||
|
||||
cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1
|
||||
|
||||
@@ -2813,14 +2819,14 @@ __install_salt_from_repo() {
|
||||
${_pip_cmd} install --force-reinstall --break-system-packages "${_arch_dep}"
|
||||
fi
|
||||
|
||||
echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} /tmp/git/deps/salt*.whl'"
|
||||
echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl'"
|
||||
|
||||
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} /tmp/git/deps/salt*.whl"
|
||||
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
|
||||
|
||||
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
|
||||
${_PIP_INSTALL_ARGS} \
|
||||
--global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
|
||||
/tmp/git/deps/salt*.whl || return 1
|
||||
${_TMP_DIR}/git/deps/salt*.whl || return 1
|
||||
|
||||
echoinfo "Checking if Salt can be imported using ${_py_exe}"
|
||||
CHECK_SALT_SCRIPT=$(cat << EOM
|
||||
@@ -6295,8 +6301,8 @@ __get_packagesite_onedir_latest() {
|
||||
}
|
||||
|
||||
|
||||
__install_saltstack_photon_onedir_repository() {
|
||||
echodebug "__install_saltstack_photon_onedir_repository() entry"
|
||||
__install_saltstack_vmware_photon_os_onedir_repository() {
|
||||
echodebug "__install_saltstack_vmware_photon_os_onedir_repository() entry"
|
||||
|
||||
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
||||
echoerror "Python version is no longer supported, only Python 3"
|
||||
@@ -6376,8 +6382,8 @@ __install_saltstack_photon_onedir_repository() {
|
||||
return 0
|
||||
}
|
||||
|
||||
install_photon_deps() {
|
||||
echodebug "install_photon_deps() entry"
|
||||
install_vmware_photon_os_deps() {
|
||||
echodebug "install_vmware_photon_os_deps() entry"
|
||||
|
||||
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
||||
echoerror "Python version is no longer supported, only Python 3"
|
||||
@@ -6406,8 +6412,8 @@ install_photon_deps() {
|
||||
return 0
|
||||
}
|
||||
|
||||
install_photon_stable_post() {
|
||||
echodebug "install_photon_stable_post() entry"
|
||||
install_vmware_photon_os_stable_post() {
|
||||
echodebug "install_vmware_photon_os_stable_post() entry"
|
||||
|
||||
for fname in api master minion syndic; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
@@ -6424,8 +6430,8 @@ install_photon_stable_post() {
|
||||
done
|
||||
}
|
||||
|
||||
install_photon_git_deps() {
|
||||
echodebug "install_photon_git_deps() entry"
|
||||
install_vmware_photon_os_git_deps() {
|
||||
echodebug "install_vmware_photon_os_git_deps() entry"
|
||||
|
||||
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
||||
echoerror "Python version is no longer supported, only Python 3"
|
||||
@@ -6463,7 +6469,7 @@ install_photon_git_deps() {
|
||||
|
||||
__PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc glibc-devel linux-devel.x86_64 cython${PY_PKG_VER}"
|
||||
|
||||
echodebug "install_photon_git_deps() distro major version, ${DISTRO_MAJOR_VERSION}"
|
||||
echodebug "install_vmware_photon_os_git_deps() distro major version, ${DISTRO_MAJOR_VERSION}"
|
||||
|
||||
## Photon 5 container is missing systemd on default installation
|
||||
if [ "${DISTRO_MAJOR_VERSION}" -lt 5 ]; then
|
||||
@@ -6489,8 +6495,8 @@ install_photon_git_deps() {
|
||||
return 0
|
||||
}
|
||||
|
||||
install_photon_git() {
|
||||
echodebug "install_photon_git() entry"
|
||||
install_vmware_photon_os_git() {
|
||||
echodebug "install_vmware_photon_os_git() entry"
|
||||
|
||||
if [ "${_PY_EXE}" != "" ]; then
|
||||
_PYEXE=${_PY_EXE}
|
||||
@@ -6500,7 +6506,7 @@ install_photon_git() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
install_photon_git_deps
|
||||
install_vmware_photon_os_git_deps
|
||||
|
||||
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
||||
${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1
|
||||
@@ -6510,8 +6516,8 @@ install_photon_git() {
|
||||
return 0
|
||||
}
|
||||
|
||||
install_photon_git_post() {
|
||||
echodebug "install_photon_git_post() entry"
|
||||
install_vmware_photon_os_git_post() {
|
||||
echodebug "install_vmware_photon_os_git_post() entry"
|
||||
|
||||
for fname in api master minion syndic; do
|
||||
# Skip if not meant to be installed
|
||||
@@ -6543,9 +6549,9 @@ install_photon_git_post() {
|
||||
done
|
||||
}
|
||||
|
||||
install_photon_restart_daemons() {
|
||||
install_vmware_photon_os_restart_daemons() {
|
||||
[ "$_START_DAEMONS" -eq $BS_FALSE ] && return
|
||||
echodebug "install_photon_restart_daemons() entry"
|
||||
echodebug "install_vmware_photon_os_restart_daemons() entry"
|
||||
|
||||
|
||||
for fname in api master minion syndic; do
|
||||
@@ -6567,8 +6573,8 @@ install_photon_restart_daemons() {
|
||||
done
|
||||
}
|
||||
|
||||
install_photon_check_services() {
|
||||
echodebug "install_photon_check_services() entry"
|
||||
install_vmware_photon_os_check_services() {
|
||||
echodebug "install_vmware_photon_os_check_services() entry"
|
||||
|
||||
for fname in api master minion syndic; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
@@ -6585,8 +6591,8 @@ install_photon_check_services() {
|
||||
return 0
|
||||
}
|
||||
|
||||
install_photon_onedir_deps() {
|
||||
echodebug "install_photon_onedir_deps() entry"
|
||||
install_vmware_photon_os_onedir_deps() {
|
||||
echodebug "install_vmware_photon_os_onedir_deps() entry"
|
||||
|
||||
|
||||
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
||||
@@ -6600,17 +6606,17 @@ install_photon_onedir_deps() {
|
||||
fi
|
||||
|
||||
if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then
|
||||
__install_saltstack_photon_onedir_repository || return 1
|
||||
__install_saltstack_vmware_photon_os_onedir_repository || return 1
|
||||
fi
|
||||
|
||||
# If -R was passed, we need to configure custom repo url with rsync-ed packages
|
||||
# Which was handled in __install_saltstack_rhel_repository buu that hanlded old-stable which is for
|
||||
# releases which are End-Of-Life. This call has its own check in case -r was passed without -R.
|
||||
if [ "$_CUSTOM_REPO_URL" != "null" ]; then
|
||||
__install_saltstack_photon_onedir_repository || return 1
|
||||
__install_saltstack_vmware_photon_os_onedir_repository || return 1
|
||||
fi
|
||||
|
||||
__PACKAGES="procps-ng sudo shadow"
|
||||
__PACKAGES="procps-ng sudo shadow wget"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
__tdnf_install_noinput ${__PACKAGES} || return 1
|
||||
@@ -6626,9 +6632,9 @@ install_photon_onedir_deps() {
|
||||
}
|
||||
|
||||
|
||||
install_photon_onedir() {
|
||||
install_vmware_photon_os_onedir() {
|
||||
|
||||
echodebug "install_photon_onedir() entry"
|
||||
echodebug "install_vmware_photon_os_onedir() entry"
|
||||
|
||||
STABLE_REV=$ONEDIR_REV
|
||||
_GENERIC_PKG_VERSION=""
|
||||
@@ -6672,9 +6678,9 @@ install_photon_onedir() {
|
||||
return 0
|
||||
}
|
||||
|
||||
install_photon_onedir_post() {
|
||||
install_vmware_photon_os_onedir_post() {
|
||||
STABLE_REV=$ONEDIR_REV
|
||||
install_photon_stable_post || return 1
|
||||
install_vmware_photon_os_stable_post || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -7797,7 +7803,7 @@ install_macosx_git_deps() {
|
||||
export PATH=/usr/local/bin:$PATH
|
||||
fi
|
||||
|
||||
__fetch_url "/tmp/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1
|
||||
__fetch_url "${_TMP_DIR}/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1
|
||||
|
||||
if [ -n "$_PY_EXE" ]; then
|
||||
_PYEXE="${_PY_EXE}"
|
||||
@@ -7807,7 +7813,7 @@ install_macosx_git_deps() {
|
||||
fi
|
||||
|
||||
# Install PIP
|
||||
$_PYEXE /tmp/get-pip.py || return 1
|
||||
$_PYEXE ${_TMP_DIR}/get-pip.py || return 1
|
||||
|
||||
# shellcheck disable=SC2119
|
||||
__git_clone_and_checkout || return 1
|
||||
@@ -7819,9 +7825,9 @@ install_macosx_stable() {
|
||||
|
||||
install_macosx_stable_deps || return 1
|
||||
|
||||
__fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1
|
||||
__fetch_url "${_TMP_DIR}/${PKG}" "${SALTPKGCONFURL}" || return 1
|
||||
|
||||
/usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1
|
||||
/usr/sbin/installer -pkg "${_TMP_DIR}/${PKG}" -target / || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
@@ -7830,9 +7836,9 @@ install_macosx_onedir() {
|
||||
|
||||
install_macosx_onedir_deps || return 1
|
||||
|
||||
__fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1
|
||||
__fetch_url "${_TMP_DIR}/${PKG}" "${SALTPKGCONFURL}" || return 1
|
||||
|
||||
/usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1
|
||||
/usr/sbin/installer -pkg "${_TMP_DIR}/${PKG}" -target / || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -1364,6 +1364,8 @@ soc:
|
||||
cases: soc
|
||||
filedatastore:
|
||||
jobDir: jobs
|
||||
retryFailureIntervalMs: 600000
|
||||
retryFailureMaxAttempts: 5
|
||||
kratos:
|
||||
hostUrl:
|
||||
hydra:
|
||||
@@ -1838,7 +1840,7 @@ soc:
|
||||
showSubtitle: true
|
||||
- name: DPD
|
||||
description: Dynamic Protocol Detection errors
|
||||
query: 'tags:dpd | groupby error.reason'
|
||||
query: '(tags:dpd OR tags:analyzer) | groupby error.reason'
|
||||
showSubtitle: true
|
||||
- name: Files
|
||||
description: Files grouped by mimetype
|
||||
@@ -2104,7 +2106,7 @@ soc:
|
||||
query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination.as.organization.name'
|
||||
- name: DPD
|
||||
description: DPD (Dynamic Protocol Detection) errors
|
||||
query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination.as.organization.name'
|
||||
query: '(tags:dpd OR tags:analyzer) | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination.as.organization.name'
|
||||
- name: Files
|
||||
description: Files seen in network traffic
|
||||
query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination.as.organization.name'
|
||||
@@ -2646,6 +2648,7 @@ soc:
|
||||
assistant:
|
||||
enabled: false
|
||||
investigationPrompt: Investigate Alert ID {socId}
|
||||
compressContextPrompt: Summarize the conversation for context compaction
|
||||
thresholdColorRatioLow: 0.5
|
||||
thresholdColorRatioMed: 0.75
|
||||
thresholdColorRatioMax: 1
|
||||
@@ -2655,18 +2658,22 @@ soc:
|
||||
contextLimitSmall: 200000
|
||||
contextLimitLarge: 1000000
|
||||
lowBalanceColorAlert: 500000
|
||||
enabled: true
|
||||
- id: sonnet-4.5
|
||||
displayName: Claude Sonnet 4.5
|
||||
contextLimitSmall: 200000
|
||||
contextLimitLarge: 1000000
|
||||
lowBalanceColorAlert: 500000
|
||||
enabled: true
|
||||
- id: gptoss-120b
|
||||
displayName: GPT-OSS 120B
|
||||
contextLimitSmall: 128000
|
||||
contextLimitLarge: 128000
|
||||
lowBalanceColorAlert: 500000
|
||||
enabled: true
|
||||
- id: qwen-235b
|
||||
displayName: QWEN 235B
|
||||
contextLimitSmall: 256000
|
||||
contextLimitLarge: 256000
|
||||
lowBalanceColorAlert: 500000
|
||||
enabled: true
|
||||
|
||||
@@ -43,10 +43,26 @@
|
||||
|
||||
No Virtual Machines Found
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
{%- elif baseDomainStatus == 'ImageDownloadStart' %}
|
||||
#### INFO
|
||||
|
||||
Base domain image download started.
|
||||
{%- elif baseDomainStatus == 'ImageDownloadFailed' %}
|
||||
#### ERROR
|
||||
|
||||
Base domain image download failed. Please check the salt-master log for details and verify network connectivity.
|
||||
{%- elif baseDomainStatus == 'SSHKeySetupFailed' %}
|
||||
#### ERROR
|
||||
|
||||
SSH key setup failed. Please check the salt-master log for details.
|
||||
{%- elif baseDomainStatus == 'SetupFailed' %}
|
||||
#### WARNING
|
||||
|
||||
Base domain has not been initialized.
|
||||
Setup failed. Please check the salt-master log for details.
|
||||
{%- elif baseDomainStatus == 'PreInit' %}
|
||||
#### WARNING
|
||||
|
||||
Base domain has not been initialized. Waiting for hypervisor to highstate.
|
||||
{%- endif %}
|
||||
{%- endmacro -%}
|
||||
|
||||
|
||||
@@ -424,6 +424,17 @@ soc:
|
||||
description: The maximum number of documents to request in a single Elasticsearch scroll request.
|
||||
bulkIndexWorkerCount:
|
||||
description: The number of worker threads to use when bulk indexing data into Elasticsearch. A value below 1 will default to the number of CPUs available.
|
||||
filedatastore:
|
||||
jobDir:
|
||||
description: The location where local job files are stored on the manager.
|
||||
global: True
|
||||
advanced: True
|
||||
retryFailureIntervalMs:
|
||||
description: The interval, in milliseconds, to wait before attempting to reprocess a failed job.
|
||||
global: True
|
||||
retryFailureMaxAttempts:
|
||||
description: The max number of attempts to process a job, in the event the job fails to complete.
|
||||
global: True
|
||||
sostatus:
|
||||
refreshIntervalMs:
|
||||
description: Duration (in milliseconds) between refreshes of the grid status. Shortening this duration may not have expected results, as the backend systems feeding this sostatus data will continue their updates as scheduled.
|
||||
@@ -652,6 +663,9 @@ soc:
|
||||
investigationPrompt:
|
||||
description: Prompt given to Onion AI when beginning an investigation.
|
||||
global: True
|
||||
compressContextPrompt:
|
||||
description: Prompt given to Onion AI when summarizing a conversation in order to compress context.
|
||||
global: True
|
||||
thresholdColorRatioLow:
|
||||
description: Lower visual context color change threshold.
|
||||
global: True
|
||||
@@ -694,6 +708,9 @@ soc:
|
||||
label: Low Balance Color Alert
|
||||
forcedType: int
|
||||
required: True
|
||||
- field: enabled
|
||||
label: Enabled
|
||||
forcedType: bool
|
||||
apiTimeoutMs:
|
||||
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
|
||||
global: True
|
||||
|
||||
@@ -29,7 +29,7 @@ suricata:
|
||||
#custom: [Accept-Encoding, Accept-Language, Authorization]
|
||||
# dump-all-headers: none
|
||||
- dns:
|
||||
version: 2
|
||||
version: 3
|
||||
enabled: "yes"
|
||||
#requests: "no"
|
||||
#responses: "no"
|
||||
|
||||
@@ -45,7 +45,7 @@ zeek:
|
||||
- protocols/ssh/geo-data
|
||||
- protocols/ssh/detect-bruteforcing
|
||||
- protocols/ssh/interesting-hostnames
|
||||
- protocols/http/detect-sqli
|
||||
- protocols/http/detect-sql-injection
|
||||
- frameworks/files/hash-all-files
|
||||
- frameworks/files/detect-MHR
|
||||
- policy/frameworks/notice/extend-email/hostnames
|
||||
|
||||
@@ -502,6 +502,7 @@ configure_minion() {
|
||||
minion_type=desktop
|
||||
fi
|
||||
info "Configuring minion type as $minion_type"
|
||||
logCmd "mkdir -p /etc/salt/minion.d"
|
||||
echo "role: so-$minion_type" > /etc/salt/grains
|
||||
|
||||
local minion_config=/etc/salt/minion
|
||||
@@ -541,20 +542,6 @@ configure_minion() {
|
||||
"log_file: /opt/so/log/salt/minion"\
|
||||
"#startup_states: highstate" >> "$minion_config"
|
||||
|
||||
# At the time the so-managerhype node does not yet have the bridge configured.
|
||||
# The so-hypervisor node doesn't either, but it doesn't cause issues here.
|
||||
local usebr0=false
|
||||
if [ "$minion_type" == 'hypervisor' ]; then
|
||||
usebr0=true
|
||||
fi
|
||||
local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}"
|
||||
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'"
|
||||
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json"
|
||||
|
||||
{
|
||||
logCmd "systemctl enable salt-minion";
|
||||
logCmd "systemctl restart salt-minion";
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
checkin_at_boot() {
|
||||
@@ -729,7 +716,7 @@ configure_network_sensor() {
|
||||
fi
|
||||
|
||||
# Create the bond interface only if it doesn't already exist
|
||||
nmcli -f name,uuid -p con | grep -q '$INTERFACE'
|
||||
nmcli -f name,uuid -p con | grep -q "$INTERFACE"
|
||||
local found_int=$?
|
||||
|
||||
if [[ $found_int != 0 ]]; then
|
||||
@@ -798,25 +785,18 @@ configure_hyper_bridge() {
|
||||
}
|
||||
|
||||
copy_salt_master_config() {
|
||||
|
||||
logCmd "mkdir /etc/salt"
|
||||
title "Copy the Salt master config template to the proper directory"
|
||||
if [ "$setup_type" = 'iso' ]; then
|
||||
logCmd "cp /root/SecurityOnion/files/salt/master/master /etc/salt/master"
|
||||
#logCmd "cp /root/SecurityOnion/files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||
else
|
||||
logCmd "cp ../files/salt/master/master /etc/salt/master"
|
||||
#logCmd "cp ../files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||
fi
|
||||
info "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
||||
logCmd "cp -R $temp_install_dir/pillar/ $local_salt_dir/"
|
||||
if [ -d "$temp_install_dir"/salt ] ; then
|
||||
logCmd "cp -R $temp_install_dir/salt/ $local_salt_dir/"
|
||||
fi
|
||||
|
||||
# Restart the service so it picks up the changes
|
||||
logCmd "systemctl daemon-reload"
|
||||
logCmd "systemctl enable salt-master"
|
||||
logCmd "systemctl restart salt-master"
|
||||
}
|
||||
|
||||
create_local_nids_rules() {
|
||||
@@ -1935,11 +1915,12 @@ repo_sync_local() {
|
||||
}
|
||||
|
||||
saltify() {
|
||||
info "Installing Salt"
|
||||
SALTVERSION=$(grep "version:" ../salt/salt/master.defaults.yaml | grep -o "[0-9]\+\.[0-9]\+")
|
||||
info "Installing Salt $SALTVERSION"
|
||||
chmod u+x ../salt/salt/scripts/bootstrap-salt.sh
|
||||
if [[ $is_deb ]]; then
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive retry 150 20 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || fail_setup
|
||||
DEBIAN_FRONTEND=noninteractive retry 30 10 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || fail_setup
|
||||
if [ $OSVER == "focal" ]; then update-alternatives --install /usr/bin/python python /usr/bin/python3.10 10; fi
|
||||
local pkg_arr=(
|
||||
'apache2-utils'
|
||||
@@ -1952,16 +1933,11 @@ saltify() {
|
||||
'jq'
|
||||
'gnupg'
|
||||
)
|
||||
retry 150 20 "apt-get -y install ${pkg_arr[*]}" || fail_setup
|
||||
retry 30 10 "apt-get -y install ${pkg_arr[*]}" || fail_setup
|
||||
|
||||
logCmd "mkdir -vp /etc/apt/keyrings"
|
||||
logCmd "wget -q --inet4-only -O /etc/apt/keyrings/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
||||
|
||||
# Download public key
|
||||
logCmd "curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023.pgp https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public"
|
||||
# Create apt repo target configuration
|
||||
echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" | sudo tee /etc/apt/sources.list.d/salt.list
|
||||
|
||||
if [[ $is_ubuntu ]]; then
|
||||
# Add Docker Repo
|
||||
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
@@ -1972,45 +1948,50 @@ saltify() {
|
||||
echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $OSVER stable" > /etc/apt/sources.list.d/docker.list
|
||||
fi
|
||||
|
||||
logCmd "apt-key add /etc/apt/keyrings/salt-archive-keyring-2023.pgp"
|
||||
|
||||
#logCmd "apt-key add /opt/so/gpg/SALTSTACK-GPG-KEY.pub"
|
||||
logCmd "apt-key add /etc/apt/keyrings/docker.pub"
|
||||
|
||||
# Add SO Saltstack Repo
|
||||
#echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt3004.2/ focal main" > /etc/apt/sources.list.d/saltstack.list
|
||||
|
||||
# Ain't nothing but a GPG
|
||||
|
||||
retry 150 20 "apt-get update" "" "Err:" || fail_setup
|
||||
retry 30 10 "apt-get update" "" "Err:" || fail_setup
|
||||
if [[ $waitforstate ]]; then
|
||||
retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION salt-master=$SALTVERSION" || fail_setup
|
||||
retry 150 20 "apt-mark hold salt-minion salt-common salt-master" || fail_setup
|
||||
retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-packaging python3-influxdb python3-lxml" || exit 1
|
||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -M -X stable $SALTVERSION" || fail_setup
|
||||
retry 30 10 "apt-mark hold salt-minion salt-common salt-master" || fail_setup
|
||||
retry 30 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-packaging python3-influxdb python3-lxml" || exit 1
|
||||
else
|
||||
retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION" || fail_setup
|
||||
retry 150 20 "apt-mark hold salt-minion salt-common" || fail_setup
|
||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -X stable $SALTVERSION" || fail_setup
|
||||
retry 30 10 "apt-mark hold salt-minion salt-common" || fail_setup
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $is_rpm ]]; then
|
||||
if [[ $waitforstate ]]; then
|
||||
# install all for a manager
|
||||
logCmd "dnf -y install salt-$SALTVERSION salt-master-$SALTVERSION salt-minion-$SALTVERSION"
|
||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -r -M -X stable $SALTVERSION" || fail_setup
|
||||
else
|
||||
# We just need the minion
|
||||
if [[ $is_airgap ]]; then
|
||||
logCmd "dnf -y install salt salt-minion"
|
||||
else
|
||||
logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION"
|
||||
fi
|
||||
# just a minion
|
||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -r -X stable $SALTVERSION" || fail_setup
|
||||
fi
|
||||
fi
|
||||
|
||||
logCmd "mkdir -p /etc/salt/minion.d"
|
||||
salt_install_module_deps
|
||||
salt_patch_x509_v2
|
||||
|
||||
# At the time the so-managerhype node does not yet have the bridge configured.
|
||||
# The so-hypervisor node doesn't either, but it doesn't cause issues here.
|
||||
local usebr0=false
|
||||
if [ "$minion_type" == 'hypervisor' ]; then
|
||||
usebr0=true
|
||||
fi
|
||||
local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}"
|
||||
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'"
|
||||
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json"
|
||||
|
||||
if [[ $waitforstate ]]; then
|
||||
logCmd "systemctl enable salt-master";
|
||||
logCmd "systemctl start salt-master";
|
||||
fi
|
||||
|
||||
logCmd "systemctl enable salt-minion";
|
||||
logCmd "systemctl restart salt-minion";
|
||||
|
||||
}
|
||||
|
||||
salt_install_module_deps() {
|
||||
|
||||
@@ -745,13 +745,12 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
securityonion_repo
|
||||
# Update existing packages
|
||||
update_packages
|
||||
# Install salt
|
||||
saltify
|
||||
# Start the master service
|
||||
# Put salt-master config in place
|
||||
copy_salt_master_config
|
||||
configure_minion "$minion_type"
|
||||
# Install salt
|
||||
saltify
|
||||
check_sos_appliance
|
||||
|
||||
logCmd "salt-key -yd $MINION_ID"
|
||||
sleep 2 # Debug RSA Key format errors
|
||||
logCmd "salt-call state.show_top"
|
||||
@@ -852,8 +851,8 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
gpg_rpm_import
|
||||
securityonion_repo
|
||||
update_packages
|
||||
saltify
|
||||
configure_minion "$minion_type"
|
||||
saltify
|
||||
check_sos_appliance
|
||||
drop_install_options
|
||||
hypervisor_local_states
|
||||
|
||||
Reference in New Issue
Block a user