Merge remote-tracking branch 'remotes/origin/2.4/dev' into reyesj2/kafka

This commit is contained in:
reyesj2
2024-05-02 15:12:27 -04:00
53 changed files with 1317 additions and 440 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -200,9 +200,15 @@ so-elasticsearch-roles-load:
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
{% if ELASTICSEARCHMERGED.index_clean %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
so-elasticsearch-indices-delete:
cron.present:
cron.{{ap}}:
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
- identifier: so-elasticsearch-indices-delete
- user: root
@@ -211,7 +217,8 @@ so-elasticsearch-indices-delete:
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}
{% endif %}
{% endif %}
{% else %}

View File

@@ -80,7 +80,7 @@
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },

View File

@@ -0,0 +1,10 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -0,0 +1,50 @@
{
"processors": [
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked",
"target_field": "network.wireless.ssid_cloaked",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid",
"target_field": "network.wireless.ssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null"
}
},
{
"set": {
"field": "network.wireless.ssid",
"value": "Hidden",
"if": "ctx?.network?.wireless?.ssid_cloaked != null && ctx?.network?.wireless?.ssid_cloaked == 1"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc",
"target_field": "network.wireless.channel_utilization",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -0,0 +1,16 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -0,0 +1,29 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.last_connected_bssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_client_map",
"processor": {
"append": {
"field": "network.wireless.known_connected_bssid",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_client_map != null"
}
}
]
}

View File

@@ -0,0 +1,159 @@
{
"processors": [
{
"json": {
"field": "message",
"target_field": "message2"
}
},
{
"date": {
"field": "message2.kismet_device_base_mod_time",
"formats": [
"epoch_second"
],
"target_field": "@timestamp"
}
},
{
"set": {
"field": "event.category",
"value": "network"
}
},
{
"dissect": {
"field": "message2.kismet_device_base_type",
"pattern": "%{wifi} %{device_type}"
}
},
{
"lowercase": {
"field": "device_type"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.{{device_type}}"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.wds_ap",
"if": "ctx?.device_type == 'wds ap'"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.ad_hoc",
"if": "ctx?.device_type == 'ad-hoc'"
}
},
{
"set": {
"field": "event.module",
"value": "kismet"
}
},
{
"rename": {
"field": "message2.kismet_device_base_packets_tx_total",
"target_field": "source.packets"
}
},
{
"rename": {
"field": "message2.kismet_device_base_num_alerts",
"target_field": "kismet.alerts.count"
}
},
{
"rename": {
"field": "message2.kismet_device_base_channel",
"target_field": "network.wireless.channel",
"if": "ctx?.message2?.kismet_device_base_channel != ''"
}
},
{
"rename": {
"field": "message2.kismet_device_base_frequency",
"target_field": "network.wireless.frequency",
"if": "ctx?.message2?.kismet_device_base_frequency != 0"
}
},
{
"rename": {
"field": "message2.kismet_device_base_last_time",
"target_field": "kismet.last_seen"
}
},
{
"date": {
"field": "kismet.last_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.last_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_first_time",
"target_field": "kismet.first_seen"
}
},
{
"date": {
"field": "kismet.first_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.first_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_seenby",
"target_field": "kismet.seenby"
}
},
{
"foreach": {
"field": "kismet.seenby",
"processor": {
"pipeline": {
"name": "kismet.seenby"
}
}
}
},
{
"rename": {
"field": "message2.kismet_device_base_manuf",
"target_field": "device.manufacturer"
}
},
{
"pipeline": {
"name": "{{event.dataset}}"
}
},
{
"remove": {
"field": [
"message2",
"message",
"device_type",
"wifi",
"agent",
"host",
"event.created"
],
"ignore_failure": true
}
}
]
}

View File

@@ -0,0 +1,9 @@
{
"processors": [
{
"pipeline": {
"name": "kismet.client"
}
}
]
}

View File

@@ -0,0 +1,52 @@
{
"processors": [
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_num_packets",
"target_field": "_ingest._value.packets_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_uuid",
"target_field": "_ingest._value.serial_number",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_first_time",
"target_field": "_ingest._value.first_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_last_time",
"target_field": "_ingest._value.last_seen",
"ignore_missing": true
}
},
{
"date": {
"field": "_ingest._value.first_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.first_seen",
"ignore_failure": true
}
},
{
"date": {
"field": "_ingest._value.last_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.last_seen",
"ignore_failure": true
}
}
]
}

View File

@@ -0,0 +1,10 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
}
]
}

View File

@@ -0,0 +1,22 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_commonname",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -27,7 +27,8 @@
"monitor",
"read",
"read_cross_cluster",
"view_index_metadata"
"view_index_metadata",
"write"
]
}
],

View File

@@ -13,7 +13,8 @@
"monitor",
"read",
"read_cross_cluster",
"view_index_metadata"
"view_index_metadata",
"write"
]
}
],

View File

@@ -5,6 +5,10 @@ elasticsearch:
esheap:
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
helpLink: elasticsearch.html
index_clean:
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
forcedType: bool
helpLink: elasticsearch.html
retention:
retention_pct:
decription: Total percentage of space used by Elasticsearch for multi node clusters
@@ -98,10 +102,6 @@ elasticsearch:
policy:
phases:
hot:
max_age:
description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
global: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
@@ -120,7 +120,9 @@ elasticsearch:
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
helpLink: elasticsearch.html
actions:
@@ -131,8 +133,8 @@ elasticsearch:
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^\[0-9\]{1,5}d$
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
actions:
@@ -145,6 +147,8 @@ elasticsearch:
delete:
min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
@@ -271,7 +275,9 @@ elasticsearch:
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
advanced: True
helpLink: elasticsearch.html
@@ -296,7 +302,9 @@ elasticsearch:
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
advanced: True
helpLink: elasticsearch.html
@@ -311,6 +319,8 @@ elasticsearch:
delete:
min_age:
description: Minimum age of index. This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True
advanced: True
helpLink: elasticsearch.html
@@ -512,6 +522,7 @@ elasticsearch:
so-suricata: *indexSettings
so-import: *indexSettings
so-kratos: *indexSettings
so-kismet: *indexSettings
so-logstash: *indexSettings
so-redis: *indexSettings
so-strelka: *indexSettings

View File

@@ -0,0 +1,36 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-device.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"device": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"manufacturer": {
"ignore_above": 1024,
"type": "keyword"
},
"model": {
"properties": {
"identifier": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
}
}
}
}

View File

@@ -0,0 +1,32 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-base.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"kismet": {
"properties": {
"alerts": {
"properties": {
"count": {
"type": "long"
}
}
},
"first_seen": {
"type": "date"
},
"last_seen": {
"type": "date"
},
"seenby": {
"type": "nested"
}
}
}
}
}
}
}

View File

@@ -77,6 +77,43 @@
"type": "keyword"
}
}
},
"wireless": {
"properties": {
"associated_clients": {
"ignore_above": 1024,
"type": "keyword"
},
"bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"channel": {
"ignore_above": 1024,
"type": "keyword"
},
"channel_utilization": {
"type": "float"
},
"frequency": {
"type": "double"
},
"ssid": {
"ignore_above": 1024,
"type": "keyword"
},
"ssid_cloaked": {
"type": "integer"
},
"known_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"last_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}

View File

@@ -20,10 +20,12 @@
"so_detection": {
"properties": {
"publicId": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"title": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"severity": {
"ignore_above": 1024,
@@ -36,6 +38,18 @@
"description": {
"type": "text"
},
"category": {
"ignore_above": 1024,
"type": "keyword"
},
"product": {
"ignore_above": 1024,
"type": "keyword"
},
"service": {
"ignore_above": 1024,
"type": "keyword"
},
"content": {
"type": "text"
},
@@ -49,7 +63,8 @@
"type": "boolean"
},
"tags": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"ruleset": {
"ignore_above": 1024,
@@ -136,4 +151,4 @@
"_meta": {
"ecs_version": "1.12.2"
}
}
}

View File

@@ -40,7 +40,7 @@ fi
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
{% if GLOBALS.role == 'so-manager' %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do
{% else %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
{% endif %}

View File

@@ -13,7 +13,7 @@ TOTAL_USED_SPACE=0
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space
{% if GLOBALS.role == 'so-manager' %}
# Get total disk space - disk.total
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do
{% else %}
# Get disk space taken up by indices - disk.indices
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do

View File

@@ -27,6 +27,7 @@ overlimit() {
# 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit.
# Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop.
while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
# If we can't query Elasticsearch, then immediately return false.
@@ -34,28 +35,36 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
[ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
# We iterate through the closed and open indices
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
CLOSED_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
OPEN_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
fi
for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Check if index is an older index. If it is an older index, delete it before moving on to newer indices.
if [[ "$INDEX" =~ "^logstash-.*|so-.*" ]]; then
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
else
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
fi
else
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1
fi
fi
if ! overlimit ; then
exit