mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-19 15:33:06 +01:00
Merge remote-tracking branch 'remotes/origin/2.4/dev' into fleet-sa
This commit is contained in:
2
.github/.gitleaks.toml
vendored
2
.github/.gitleaks.toml
vendored
@@ -536,7 +536,7 @@ secretGroup = 4
|
|||||||
|
|
||||||
[allowlist]
|
[allowlist]
|
||||||
description = "global allow lists"
|
description = "global allow lists"
|
||||||
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''']
|
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''']
|
||||||
paths = [
|
paths = [
|
||||||
'''gitleaks.toml''',
|
'''gitleaks.toml''',
|
||||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{% import_yaml 'firewall/ports/ports.yaml' as default_portgroups %}
|
{% import_yaml 'firewall/ports/ports.yaml' as default_portgroups %}
|
||||||
{% set default_portgroups = default_portgroups.firewall.ports %}
|
{% set default_portgroups = default_portgroups.firewall.ports %}
|
||||||
{% import_yaml 'firewall/portgroups.local.yaml' as local_portgroups %}
|
{% import_yaml 'firewall/ports/ports.local.yaml' as local_portgroups %}
|
||||||
{% if local_portgroups.firewall.ports %}
|
{% if local_portgroups.firewall.ports %}
|
||||||
{% set local_portgroups = local_portgroups.firewall.ports %}
|
{% set local_portgroups = local_portgroups.firewall.ports %}
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|||||||
@@ -1,66 +0,0 @@
|
|||||||
firewall:
|
|
||||||
hostgroups:
|
|
||||||
analyst:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
beats_endpoint:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
beats_endpoint_ssl:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
elasticsearch_rest:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
elastic_agent_endpoint:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
endgame:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
fleet:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
heavy_node:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
idh:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
manager:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
node:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
receiver:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
search_node:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
sensor:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
strelka_frontend:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
syslog:
|
|
||||||
ips:
|
|
||||||
delete:
|
|
||||||
insert:
|
|
||||||
@@ -45,12 +45,10 @@ echo " rootfs: $ROOTFS" >> $local_salt_dir/pillar/data/$TYPE.sls
|
|||||||
echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls
|
echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
if [ $TYPE == 'sensorstab' ]; then
|
if [ $TYPE == 'sensorstab' ]; then
|
||||||
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
|
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
salt-call state.apply grafana queue=True
|
|
||||||
fi
|
fi
|
||||||
if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then
|
if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then
|
||||||
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
|
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
if [ ! $10 ]; then
|
if [ ! $10 ]; then
|
||||||
salt-call state.apply grafana queue=True
|
|
||||||
salt-call state.apply utility queue=True
|
salt-call state.apply utility queue=True
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
|
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
|
||||||
{% for minionid, ip in salt.saltutil.runner(
|
{% for minionid, ip in salt.saltutil.runner(
|
||||||
'mine.get',
|
'mine.get',
|
||||||
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-node or G@role:so-heavynode or G@role:so-receiver or G@role:so-helix ',
|
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-helix ',
|
||||||
fun='network.ip_addrs',
|
fun='network.ip_addrs',
|
||||||
tgt_type='compound') | dictsort()
|
tgt_type='compound') | dictsort()
|
||||||
%}
|
%}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ base:
|
|||||||
- sensoroni.adv_sensoroni
|
- sensoroni.adv_sensoroni
|
||||||
- telegraf.soc_telegraf
|
- telegraf.soc_telegraf
|
||||||
- telegraf.adv_telegraf
|
- telegraf.adv_telegraf
|
||||||
|
- influxdb.token
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
|
|
||||||
'* and not *_eval and not *_import':
|
'* and not *_eval and not *_import':
|
||||||
@@ -18,6 +19,8 @@ base:
|
|||||||
'*_eval or *_heavynode or *_sensor or *_standalone or *_import':
|
'*_eval or *_heavynode or *_sensor or *_standalone or *_import':
|
||||||
- match: compound
|
- match: compound
|
||||||
- zeek
|
- zeek
|
||||||
|
- bpf.soc_bpf
|
||||||
|
- bpf.adv_bpf
|
||||||
|
|
||||||
'*_managersearch or *_heavynode':
|
'*_managersearch or *_heavynode':
|
||||||
- match: compound
|
- match: compound
|
||||||
@@ -27,6 +30,8 @@ base:
|
|||||||
- logstash.soc_logstash
|
- logstash.soc_logstash
|
||||||
- logstash.adv_logstash
|
- logstash.adv_logstash
|
||||||
- elasticsearch.index_templates
|
- elasticsearch.index_templates
|
||||||
|
- elasticsearch.soc_elasticsearch
|
||||||
|
- elasticsearch.adv_elasticsearch
|
||||||
|
|
||||||
'*_manager':
|
'*_manager':
|
||||||
- logstash
|
- logstash
|
||||||
@@ -48,6 +53,8 @@ base:
|
|||||||
- adv_global
|
- adv_global
|
||||||
- manager.soc_manager
|
- manager.soc_manager
|
||||||
- manager.adv_manager
|
- manager.adv_manager
|
||||||
|
- idstools.soc_idstools
|
||||||
|
- idstools.adv_idstools
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- kratos.soc_kratos
|
- kratos.soc_kratos
|
||||||
@@ -60,6 +67,8 @@ base:
|
|||||||
- elasticsearch.adv_elasticsearch
|
- elasticsearch.adv_elasticsearch
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
- backup.adv_backup
|
- backup.adv_backup
|
||||||
|
- firewall.soc_firewall
|
||||||
|
- firewall.adv_firewall
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
|
||||||
@@ -85,6 +94,9 @@ base:
|
|||||||
- elasticsearch.soc_elasticsearch
|
- elasticsearch.soc_elasticsearch
|
||||||
- elasticsearch.adv_elasticsearch
|
- elasticsearch.adv_elasticsearch
|
||||||
- manager.soc_manager
|
- manager.soc_manager
|
||||||
|
- manager.adv_manager
|
||||||
|
- idstools.soc_idstools
|
||||||
|
- idstools.adv_idstools
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- kratos.soc_kratos
|
- kratos.soc_kratos
|
||||||
- kratos.adv_kratos
|
- kratos.adv_kratos
|
||||||
@@ -94,6 +106,8 @@ base:
|
|||||||
- influxdb.adv_influxdb
|
- influxdb.adv_influxdb
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
- backup.adv_backup
|
- backup.adv_backup
|
||||||
|
- firewall.soc_firewall
|
||||||
|
- firewall.adv_firewall
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
|
||||||
@@ -113,6 +127,8 @@ base:
|
|||||||
- secrets
|
- secrets
|
||||||
- healthcheck.standalone
|
- healthcheck.standalone
|
||||||
- soc_global
|
- soc_global
|
||||||
|
- idstools.soc_idstools
|
||||||
|
- idstools.adv_idstools
|
||||||
- kratos.soc_kratos
|
- kratos.soc_kratos
|
||||||
- kratos.adv_kratos
|
- kratos.adv_kratos
|
||||||
- redis.soc_redis
|
- redis.soc_redis
|
||||||
@@ -122,9 +138,12 @@ base:
|
|||||||
- elasticsearch.soc_elasticsearch
|
- elasticsearch.soc_elasticsearch
|
||||||
- elasticsearch.adv_elasticsearch
|
- elasticsearch.adv_elasticsearch
|
||||||
- manager.soc_manager
|
- manager.soc_manager
|
||||||
|
- manager.adv_manager
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
- backup.adv_backup
|
- backup.adv_backup
|
||||||
|
- firewall.soc_firewall
|
||||||
|
- firewall.adv_firewall
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
|
||||||
@@ -138,6 +157,8 @@ base:
|
|||||||
'*_idh':
|
'*_idh':
|
||||||
- soc_global
|
- soc_global
|
||||||
- adv_global
|
- adv_global
|
||||||
|
- idh.soc_idh
|
||||||
|
- idh.adv_idh
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
|
||||||
@@ -147,6 +168,8 @@ base:
|
|||||||
- logstash.soc_logstash
|
- logstash.soc_logstash
|
||||||
- logstash.adv_logstash
|
- logstash.adv_logstash
|
||||||
- elasticsearch.index_templates
|
- elasticsearch.index_templates
|
||||||
|
- elasticsearch.soc_elasticsearch
|
||||||
|
- elasticsearch.adv_elasticsearch
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||||
- elasticsearch.auth
|
- elasticsearch.auth
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -155,7 +178,6 @@ base:
|
|||||||
- adv_global
|
- adv_global
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- data.nodestab
|
|
||||||
|
|
||||||
'*_receiver':
|
'*_receiver':
|
||||||
- logstash
|
- logstash
|
||||||
@@ -185,6 +207,7 @@ base:
|
|||||||
- elasticsearch.soc_elasticsearch
|
- elasticsearch.soc_elasticsearch
|
||||||
- elasticsearch.adv_elasticsearch
|
- elasticsearch.adv_elasticsearch
|
||||||
- manager.soc_manager
|
- manager.soc_manager
|
||||||
|
- manager.adv_manager
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc_global
|
- soc_global
|
||||||
- adv_global
|
- adv_global
|
||||||
@@ -196,6 +219,8 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- influxdb.soc_influxdb
|
- influxdb.soc_influxdb
|
||||||
- influxdb.adv_influxdb
|
- influxdb.adv_influxdb
|
||||||
|
- firewall.soc_firewall
|
||||||
|
- firewall.adv_firewall
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ def check():
|
|||||||
if path.exists('/var/run/reboot-required'):
|
if path.exists('/var/run/reboot-required'):
|
||||||
retval = 'True'
|
retval = 'True'
|
||||||
|
|
||||||
elif os == 'CentOS':
|
elif os == 'Rocky':
|
||||||
cmd = 'needs-restarting -r > /dev/null 2>&1'
|
cmd = 'needs-restarting -r > /dev/null 2>&1'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ import logging
|
|||||||
def status():
|
def status():
|
||||||
return __salt__['cmd.run']('/usr/sbin/so-status')
|
return __salt__['cmd.run']('/usr/sbin/so-status')
|
||||||
|
|
||||||
|
def version():
|
||||||
|
return __salt__['cp.get_file_str']('/etc/soversion')
|
||||||
|
|
||||||
def mysql_conn(retry):
|
def mysql_conn(retry):
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||||
{% set ELASTALERT = salt['pillar.get']('elastalert:enabled', True) %}
|
{% set ELASTALERT = salt['pillar.get']('elastalert:enabled', True) %}
|
||||||
{% set ELASTICSEARCH = salt['pillar.get']('elasticsearch:enabled', True) %}
|
{% set ELASTICSEARCH = salt['pillar.get']('elasticsearch:enabled', True) %}
|
||||||
{% set FILEBEAT = salt['pillar.get']('filebeat:enabled', True) %}
|
|
||||||
{% set KIBANA = salt['pillar.get']('kibana:enabled', True) %}
|
{% set KIBANA = salt['pillar.get']('kibana:enabled', True) %}
|
||||||
{% set LOGSTASH = salt['pillar.get']('logstash:enabled', True) %}
|
{% set LOGSTASH = salt['pillar.get']('logstash:enabled', True) %}
|
||||||
{% set CURATOR = salt['pillar.get']('curator:enabled', True) %}
|
{% set CURATOR = salt['pillar.get']('curator:enabled', True) %}
|
||||||
@@ -33,10 +32,9 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
'grafana',
|
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elastic-fleet',
|
'elasticfleet',
|
||||||
'firewall',
|
'firewall',
|
||||||
'idstools',
|
'idstools',
|
||||||
'suricata.manager',
|
'suricata.manager',
|
||||||
@@ -82,7 +80,6 @@
|
|||||||
'ssl',
|
'ssl',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'firewall',
|
'firewall',
|
||||||
'filebeat',
|
|
||||||
'idh',
|
'idh',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean'
|
'docker_clean'
|
||||||
@@ -108,7 +105,7 @@
|
|||||||
'schedule',
|
'schedule',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean',
|
'docker_clean',
|
||||||
'elastic-fleet'
|
'elasticfleet'
|
||||||
],
|
],
|
||||||
'so-manager': [
|
'so-manager': [
|
||||||
'salt.master',
|
'salt.master',
|
||||||
@@ -119,10 +116,9 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
'grafana',
|
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elastic-fleet',
|
'elasticfleet',
|
||||||
'firewall',
|
'firewall',
|
||||||
'idstools',
|
'idstools',
|
||||||
'suricata.manager',
|
'suricata.manager',
|
||||||
@@ -139,10 +135,9 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
'grafana',
|
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elastic-fleet',
|
'elasticfleet',
|
||||||
'firewall',
|
'firewall',
|
||||||
'manager',
|
'manager',
|
||||||
'idstools',
|
'idstools',
|
||||||
@@ -169,10 +164,9 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
'grafana',
|
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elastic-fleet',
|
'elasticfleet',
|
||||||
'firewall',
|
'firewall',
|
||||||
'idstools',
|
'idstools',
|
||||||
'suricata.manager',
|
'suricata.manager',
|
||||||
@@ -193,7 +187,6 @@
|
|||||||
'pcap',
|
'pcap',
|
||||||
'suricata',
|
'suricata',
|
||||||
'healthcheck',
|
'healthcheck',
|
||||||
'filebeat',
|
|
||||||
'schedule',
|
'schedule',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean'
|
'docker_clean'
|
||||||
@@ -219,10 +212,6 @@
|
|||||||
],
|
],
|
||||||
}, grain='role') %}
|
}, grain='role') %}
|
||||||
|
|
||||||
{% if FILEBEAT and grains.role in ['so-helixsensor', 'so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-receiver'] %}
|
|
||||||
{% do allowed_states.append('filebeat') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if (PLAYBOOK != 0) and grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
{% if (PLAYBOOK != 0) and grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||||
{% do allowed_states.append('mysql') %}
|
{% do allowed_states.append('mysql') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -272,13 +261,6 @@
|
|||||||
{% do allowed_states.append('redis') %}
|
{% do allowed_states.append('redis') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if grains.os == 'CentOS' %}
|
|
||||||
{% if not ISAIRGAP %}
|
|
||||||
{% do allowed_states.append('yum') %}
|
|
||||||
{% endif %}
|
|
||||||
{% do allowed_states.append('yum.packages') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{# all nodes on the right salt version can run the following states #}
|
{# all nodes on the right salt version can run the following states #}
|
||||||
{% do allowed_states.append('common') %}
|
{% do allowed_states.append('common') %}
|
||||||
{% do allowed_states.append('patch.os.schedule') %}
|
{% do allowed_states.append('patch.os.schedule') %}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ config_backup_script:
|
|||||||
- source: salt://backup/tools/sbin/so-config-backup.jinja
|
- source: salt://backup/tools/sbin/so-config-backup.jinja
|
||||||
- defaults:
|
- defaults:
|
||||||
BACKUPLOCATIONS: {{ BACKUP_MERGED.locations }}
|
BACKUPLOCATIONS: {{ BACKUP_MERGED.locations }}
|
||||||
|
DESTINATION: {{ BACKUP_MERGED.destination }}
|
||||||
|
|
||||||
# Add config backup
|
# Add config backup
|
||||||
so_config_backup:
|
so_config_backup:
|
||||||
|
|||||||
@@ -3,4 +3,5 @@ backup:
|
|||||||
- /opt/so/saltstack/local
|
- /opt/so/saltstack/local
|
||||||
- /etc/pki
|
- /etc/pki
|
||||||
- /etc/salt
|
- /etc/salt
|
||||||
- /opt/so/conf/kratos
|
- /nsm/kratos
|
||||||
|
destination: "/nsm/backup"
|
||||||
10
salt/backup/soc_backup.yaml
Normal file
10
salt/backup/soc_backup.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
backup:
|
||||||
|
locations:
|
||||||
|
description: List of locations to back up to the destination.
|
||||||
|
helpLink: backup.html
|
||||||
|
global: True
|
||||||
|
destination:
|
||||||
|
description: Directory to store the configuration backups in.
|
||||||
|
helpLink: backup.html
|
||||||
|
global: True
|
||||||
|
|
||||||
@@ -8,7 +8,8 @@
|
|||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
TODAY=$(date '+%Y_%m_%d')
|
TODAY=$(date '+%Y_%m_%d')
|
||||||
BACKUPFILE="/nsm/backup/so-config-backup-$TODAY.tar"
|
BACKUPDIR={{ DESTINATION }}
|
||||||
|
BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar"
|
||||||
MAXBACKUPS=7
|
MAXBACKUPS=7
|
||||||
|
|
||||||
# Create backup dir if it does not exist
|
# Create backup dir if it does not exist
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
bpf:
|
bpf:
|
||||||
pcap: []
|
pcap: []
|
||||||
suricata: []
|
suricata: []
|
||||||
zeek: []
|
zeek: []
|
||||||
|
|||||||
4
salt/bpf/pcap.map.jinja
Normal file
4
salt/bpf/pcap.map.jinja
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
|
|
||||||
|
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||||
@@ -1,10 +1,16 @@
|
|||||||
bpf:
|
bpf:
|
||||||
pcap:
|
pcap:
|
||||||
description: List of BPF filters to apply to PCAP.
|
description: List of BPF filters to apply to PCAP.
|
||||||
|
multiline: True
|
||||||
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
suricata:
|
suricata:
|
||||||
description: List of BPF filters to apply to Suricata.
|
description: List of BPF filters to apply to Suricata.
|
||||||
|
multiline: True
|
||||||
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
zeek:
|
zeek:
|
||||||
description: List of BPF filters to apply to Zeek.
|
description: List of BPF filters to apply to Zeek.
|
||||||
|
multiline: True
|
||||||
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
|
|||||||
4
salt/bpf/suricata.map.jinja
Normal file
4
salt/bpf/suricata.map.jinja
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
|
|
||||||
|
{% set SURICATABPF = BPFMERGED.suricata %}
|
||||||
4
salt/bpf/zeek.map.jinja
Normal file
4
salt/bpf/zeek.map.jinja
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
|
|
||||||
|
{% set ZEEKBPF = BPFMERGED.zeek %}
|
||||||
@@ -235,7 +235,7 @@ soversionfile:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if GLOBALS.so_model %}
|
{% if GLOBALS.so_model %}
|
||||||
{% if GLOBALS.os == 'CentOS' %}
|
{% if GLOBALS.os == 'Rocky' %}
|
||||||
# Install Raid tools
|
# Install Raid tools
|
||||||
raidpkgs:
|
raidpkgs:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
|
|||||||
@@ -42,48 +42,15 @@ commonpkgs:
|
|||||||
- mariadb-devel
|
- mariadb-devel
|
||||||
- python3-dnf-plugin-versionlock
|
- python3-dnf-plugin-versionlock
|
||||||
- nmap-ncat
|
- nmap-ncat
|
||||||
- createrepo
|
|
||||||
- python3-lxml
|
|
||||||
- python3-packaging
|
|
||||||
- python3-watchdog
|
|
||||||
- yum-utils
|
- yum-utils
|
||||||
- device-mapper-persistent-data
|
- device-mapper-persistent-data
|
||||||
- lvm2
|
- lvm2
|
||||||
- openssl
|
- openssl
|
||||||
- git
|
- git
|
||||||
- vim-enhanced
|
|
||||||
- python3-docker
|
- python3-docker
|
||||||
{% else %}
|
- python3-m2crypto
|
||||||
commonpkgs:
|
- rsync
|
||||||
pkg.installed:
|
- python3-rich
|
||||||
- skip_suggestions: True
|
- python3-watchdog
|
||||||
- pkgs:
|
- unzip
|
||||||
- wget
|
|
||||||
- ntpdate
|
|
||||||
- bind-utils
|
|
||||||
- jq
|
|
||||||
- tcpdump
|
|
||||||
- httpd-tools
|
|
||||||
- net-tools
|
|
||||||
- curl
|
|
||||||
- sqlite
|
|
||||||
- mariadb-devel
|
|
||||||
- nmap-ncat
|
|
||||||
- python3
|
|
||||||
- python36-packaging
|
|
||||||
- python36-lxml
|
|
||||||
- python36-docker
|
|
||||||
- python36-dateutil
|
|
||||||
- python36-m2crypto
|
|
||||||
- python36-mysql
|
|
||||||
- python36-packaging
|
|
||||||
- python36-lxml
|
|
||||||
- securityonion-python36-watchdog
|
|
||||||
- yum-utils
|
|
||||||
- device-mapper-persistent-data
|
|
||||||
- lvm2
|
|
||||||
- openssl
|
|
||||||
- git
|
|
||||||
- vim-enhanced
|
|
||||||
- yum-plugin-versionlock
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -6,8 +6,8 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
{# we only want the script to install the workstation if it is CentOS -#}
|
{# we only want the script to install the workstation if it is Rocky -#}
|
||||||
{% if grains.os == 'CentOS' -%}
|
{% if grains.os == 'Rocky' -%}
|
||||||
{# if this is a manager -#}
|
{# if this is a manager -#}
|
||||||
{% if grains.master == grains.id.split('_')|first -%}
|
{% if grains.master == grains.id.split('_')|first -%}
|
||||||
|
|
||||||
@@ -80,12 +80,12 @@ echo "Since this is not a manager, the pillar values to enable analyst workstati
|
|||||||
{#- endif if this is a manager #}
|
{#- endif if this is a manager #}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
{#- if not CentOS #}
|
{#- if not Rocky #}
|
||||||
{%- else %}
|
{%- else %}
|
||||||
|
|
||||||
echo "The Analyst Workstation can only be installed on CentOS. Please view the documentation at $doc_workstation_url."
|
echo "The Analyst Workstation can only be installed on Rocky. Please view the documentation at $doc_workstation_url."
|
||||||
|
|
||||||
{#- endif grains.os == CentOS #}
|
{#- endif grains.os == Rocky #}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
|||||||
@@ -8,10 +8,17 @@
|
|||||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||||
DOC_BASE_URL="https://docs.securityonion.net/en/2.4"
|
DOC_BASE_URL="https://docs.securityonion.net/en/2.4"
|
||||||
|
|
||||||
# Check for prerequisites
|
if [ -z $NOROOT ]; then
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
# Check for prerequisites
|
||||||
echo "This script must be run using sudo!"
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
exit 1
|
echo "This script must be run using sudo!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure /usr/sbin is in path
|
||||||
|
if ! echo "$PATH" | grep -q "/usr/sbin"; then
|
||||||
|
export PATH="$PATH:/usr/sbin"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Define a banner to separate sections
|
# Define a banner to separate sections
|
||||||
@@ -47,33 +54,37 @@ add_interface_bond0() {
|
|||||||
ethtool -K "$BNIC" $i off &>/dev/null
|
ethtool -K "$BNIC" $i off &>/dev/null
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
# Check if the bond slave connection has already been created
|
|
||||||
nmcli -f name,uuid -p con | grep -q "bond0-slave-$BNIC"
|
|
||||||
local found_int=$?
|
|
||||||
|
|
||||||
if [[ $found_int != 0 ]]; then
|
if ! [[ $is_cloud ]]; then
|
||||||
# Create the slave interface and assign it to the bond
|
# Check if the bond slave connection has already been created
|
||||||
nmcli con add type ethernet ifname "$BNIC" con-name "bond0-slave-$BNIC" master bond0 -- \
|
nmcli -f name,uuid -p con | grep -q "bond0-slave-$BNIC"
|
||||||
ethernet.mtu "$MTU" \
|
local found_int=$?
|
||||||
connection.autoconnect "yes"
|
|
||||||
else
|
|
||||||
local int_uuid
|
|
||||||
int_uuid=$(nmcli -f name,uuid -p con | sed -n "s/bond0-slave-$BNIC //p" | tr -d ' ')
|
|
||||||
|
|
||||||
nmcli con mod "$int_uuid" \
|
if [[ $found_int != 0 ]]; then
|
||||||
ethernet.mtu "$MTU" \
|
# Create the slave interface and assign it to the bond
|
||||||
connection.autoconnect "yes"
|
nmcli con add type ethernet ifname "$BNIC" con-name "bond0-slave-$BNIC" master bond0 -- \
|
||||||
fi
|
ethernet.mtu "$MTU" \
|
||||||
|
connection.autoconnect "yes"
|
||||||
|
else
|
||||||
|
local int_uuid
|
||||||
|
int_uuid=$(nmcli -f name,uuid -p con | sed -n "s/bond0-slave-$BNIC //p" | tr -d ' ')
|
||||||
|
|
||||||
|
nmcli con mod "$int_uuid" \
|
||||||
|
ethernet.mtu "$MTU" \
|
||||||
|
connection.autoconnect "yes"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
ip link set dev "$BNIC" arp off multicast off allmulticast off promisc on
|
ip link set dev "$BNIC" arp off multicast off allmulticast off promisc on
|
||||||
|
|
||||||
# Bring the slave interface up
|
if ! [[ $is_cloud ]]; then
|
||||||
if [[ $verbose == true ]]; then
|
# Bring the slave interface up
|
||||||
nmcli con up "bond0-slave-$BNIC"
|
if [[ $verbose == true ]]; then
|
||||||
else
|
nmcli con up "bond0-slave-$BNIC"
|
||||||
nmcli con up "bond0-slave-$BNIC" &>/dev/null
|
else
|
||||||
|
nmcli con up "bond0-slave-$BNIC" &>/dev/null
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$nic_error" != 0 ]; then
|
if [ "$nic_error" != 0 ]; then
|
||||||
return "$nic_error"
|
return "$nic_error"
|
||||||
fi
|
fi
|
||||||
@@ -187,14 +198,14 @@ get_random_value() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
gpg_rpm_import() {
|
gpg_rpm_import() {
|
||||||
if [[ "$OS" == "centos" ]]; then
|
if [[ "$OS" == "rocky" ]]; then
|
||||||
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
||||||
local RPMKEYSLOC="../salt/repo/client/files/centos/keys"
|
local RPMKEYSLOC="../salt/repo/client/files/rocky/keys"
|
||||||
else
|
else
|
||||||
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/centos/keys"
|
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/rocky/keys"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RPMKEYS=('RPM-GPG-KEY-EPEL-7' 'docker.pub' 'SALTSTACK-GPG-KEY.pub' 'securityonion.pub')
|
RPMKEYS=('RPM-GPG-KEY-EPEL-9' 'SALTSTACK-GPG-KEY2.pub' 'docker.pub' 'securityonion.pub')
|
||||||
|
|
||||||
for RPMKEY in "${RPMKEYS[@]}"; do
|
for RPMKEY in "${RPMKEYS[@]}"; do
|
||||||
rpm --import $RPMKEYSLOC/$RPMKEY
|
rpm --import $RPMKEYSLOC/$RPMKEY
|
||||||
@@ -366,17 +377,23 @@ run_check_net_err() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
salt_minion_count() {
|
||||||
|
local MINIONDIR="/opt/so/saltstack/local/pillar/minions"
|
||||||
|
MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
set_cron_service_name() {
|
set_cron_service_name() {
|
||||||
if [[ "$OS" == "centos" ]]; then
|
if [[ "$OS" == "rocky" ]]; then
|
||||||
cron_service_name="crond"
|
cron_service_name="crond"
|
||||||
else
|
else
|
||||||
cron_service_name="cron"
|
cron_service_name="cron"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
set_os() {
|
set_os() {
|
||||||
if [ -f /etc/redhat-release ]; then
|
if [ -f /etc/redhat-release ]; then
|
||||||
OS=centos
|
OS=rocky
|
||||||
else
|
else
|
||||||
OS=ubuntu
|
OS=ubuntu
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -24,11 +24,11 @@ mkdir -p /tmp/elastic-agent-workspace
|
|||||||
for OS in "${CONTAINERGOOS[@]}"
|
for OS in "${CONTAINERGOOS[@]}"
|
||||||
do
|
do
|
||||||
printf "\n\nGenerating $OS Installer..."
|
printf "\n\nGenerating $OS Installer..."
|
||||||
cp /opt/so/saltstack/default/salt/elastic-fleet/files/elastic-agent/so-elastic-agent-*-$OS-x86_64.tar.gz /tmp/elastic-agent-workspace/$OS.tar.gz
|
cp /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/so-elastic-agent-*-$OS-x86_64.tar.gz /tmp/elastic-agent-workspace/$OS.tar.gz
|
||||||
docker run -e CGO_ENABLED=0 -e GOOS=$OS \
|
docker run -e CGO_ENABLED=0 -e GOOS=$OS \
|
||||||
--mount type=bind,source=/etc/ssl/certs/,target=/workspace/files/cert/ \
|
--mount type=bind,source=/etc/ssl/certs/,target=/workspace/files/cert/ \
|
||||||
--mount type=bind,source=/tmp/elastic-agent-workspace/,target=/workspace/files/elastic-agent/ \
|
--mount type=bind,source=/tmp/elastic-agent-workspace/,target=/workspace/files/elastic-agent/ \
|
||||||
--mount type=bind,source=/opt/so/saltstack/local/salt/elastic-fleet/files/so_agent-installers/,target=/output/ \
|
--mount type=bind,source=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/,target=/output/ \
|
||||||
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} go build -ldflags "-X main.fleetHost=$FLEETHOST -X main.enrollmentToken=$ENROLLMENTOKEN" -o /output/so-elastic-agent_$OS
|
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} go build -ldflags "-X main.fleetHost=$FLEETHOST -X main.enrollmentToken=$ENROLLMENTOKEN" -o /output/so-elastic-agent_$OS
|
||||||
printf "\n $OS Installer Generated..."
|
printf "\n $OS Installer Generated..."
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -95,8 +95,6 @@ function soUserSync() {
|
|||||||
$(dirname $0)/so-user sync
|
$(dirname $0)/so-user sync
|
||||||
printf "\nApplying logstash state to the appropriate nodes.\n\n"
|
printf "\nApplying logstash state to the appropriate nodes.\n\n"
|
||||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply logstash queue=True
|
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply logstash queue=True
|
||||||
printf "\nApplying filebeat state to the appropriate nodes.\n\n"
|
|
||||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode or G@role:so-sensor or G@role:so-fleet' state.apply filebeat queue=True
|
|
||||||
printf "\nApplying kibana state to the appropriate nodes.\n\n"
|
printf "\nApplying kibana state to the appropriate nodes.\n\n"
|
||||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch' state.apply kibana queue=True
|
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch' state.apply kibana queue=True
|
||||||
printf "\nApplying curator state to the appropriate nodes.\n\n"
|
printf "\nApplying curator state to the appropriate nodes.\n\n"
|
||||||
|
|||||||
@@ -54,17 +54,10 @@ if [ $SKIP -ne 1 ]; then
|
|||||||
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
|
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check to see if Logstash/Filebeat are running
|
# Check to see if Logstash are running
|
||||||
LS_ENABLED=$(so-status | grep logstash)
|
LS_ENABLED=$(so-status | grep logstash)
|
||||||
FB_ENABLED=$(so-status | grep filebeat)
|
|
||||||
EA_ENABLED=$(so-status | grep elastalert)
|
EA_ENABLED=$(so-status | grep elastalert)
|
||||||
|
|
||||||
if [ ! -z "$FB_ENABLED" ]; then
|
|
||||||
|
|
||||||
/usr/sbin/so-filebeat-stop
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -z "$LS_ENABLED" ]; then
|
if [ ! -z "$LS_ENABLED" ]; then
|
||||||
|
|
||||||
/usr/sbin/so-logstash-stop
|
/usr/sbin/so-logstash-stop
|
||||||
@@ -86,13 +79,7 @@ do
|
|||||||
curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||||
done
|
done
|
||||||
|
|
||||||
#Start Logstash/Filebeat
|
#Start Logstash
|
||||||
if [ ! -z "$FB_ENABLED" ]; then
|
|
||||||
|
|
||||||
/usr/sbin/so-filebeat-start
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -z "$LS_ENABLED" ]; then
|
if [ ! -z "$LS_ENABLED" ]; then
|
||||||
|
|
||||||
/usr/sbin/so-logstash-start
|
/usr/sbin/so-logstash-start
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
|||||||
2
salt/common/tools/sbin/so-elastic-fleet-agent-policy-view
Normal file → Executable file
2
salt/common/tools/sbin/so-elastic-fleet-agent-policy-view
Normal file → Executable file
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
|||||||
2
salt/common/tools/sbin/so-elastic-fleet-integration-policy-bulk-delete
Normal file → Executable file
2
salt/common/tools/sbin/so-elastic-fleet-integration-policy-bulk-delete
Normal file → Executable file
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
@@ -11,40 +11,51 @@
|
|||||||
{%- set RITAENABLED = salt['pillar.get']('rita:enabled', False) %}
|
{%- set RITAENABLED = salt['pillar.get']('rita:enabled', False) %}
|
||||||
|
|
||||||
wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||||
## This hackery will be removed if using Elastic Auth ##
|
|
||||||
|
|
||||||
# Let's snag a cookie from Kibana
|
# Let's snag a cookie from Kibana
|
||||||
SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||||
|
|
||||||
# Disable certain Features from showing up in the Kibana UI
|
# Disable certain Features from showing up in the Kibana UI
|
||||||
echo
|
echo
|
||||||
echo "Setting up default Security Onion package policies for Elastic Agent..."
|
echo "Disable certain Features from showing up in the Kibana UI"
|
||||||
|
so-kibana-space-defaults
|
||||||
|
echo
|
||||||
|
|
||||||
# Set up Suricata logs
|
# Suricata logs
|
||||||
echo
|
echo
|
||||||
echo "Setting up Suricata package policy..."
|
echo "Setting up Suricata package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "suricata-logs", "name": "suricata-logs", "description": "Suricata integration", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/suricata/eve*.json" ], "data_stream.dataset": "suricata", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata", "custom": "pipeline: suricata.common" }}}}}}'
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "suricata-logs", "name": "suricata-logs", "description": "Suricata integration", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/suricata/eve*.json" ], "data_stream.dataset": "suricata", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata", "custom": "pipeline: suricata.common" }}}}}}'
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# Set up Zeek logs
|
# Zeek logs
|
||||||
echo
|
echo
|
||||||
echo "Setting up Zeek package policy..."
|
echo "Setting up Zeek package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "zeek-logs", "description": "Zeek logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/zeek/logs/current/*.log"], "data_stream.dataset": "zeek", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", "custom": "prospector.scanner.exclude_files: [\"(broker | capture_loss | loaded_scripts | packet_filter | stats | stderr | stdout).log$\"]\n" } } } } } }'
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "zeek-logs", "description": "Zeek logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/zeek/logs/current/*.log"], "data_stream.dataset": "zeek", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", "custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n" } } } } } }'
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# Import - Suricata
|
|
||||||
|
# Import - EVTX
|
||||||
echo
|
echo
|
||||||
echo "Settings up Suricata import package policy..."
|
echo "Setting up EVTX import package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "import-suricata-logs", "name": "import-suricata-logs", "description": "Import Suricata logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/suricata/eve*.json"], "data_stream.dataset": "import", "tags": [], "processors": "- add_fields:\n target: event\n fields:\n category: file\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"", "custom": "pipeline: suricata.common" } } } } } }'
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "package": { "name": "log", "version": "1.1.0" }, "name": "import-evtx-logs", "namespace": "so", "description": "Import Windows EVTX logs", "policy_id": "so-grid-nodes", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/import/*/evtx/data.json" ], "data_stream.dataset": "import", "custom": "pipeline: import.wel", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: windows_eventlog\n imported: true", "tags": [] } } } } } }'
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# Set Import - Zeek logs
|
# Import - Suricata logs
|
||||||
echo
|
echo
|
||||||
echo "Setting up Zeek Import package policy..."
|
echo "Setting up Suricata import package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "import-zeek-logs", "description": "Zeek Import logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/zeek/logs/*.log"], "data_stream.dataset": "import", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"" } } } } } }'
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "import-suricata-logs", "name": "import-suricata-logs", "description": "Import Suricata logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/suricata/eve*.json"], "data_stream.dataset": "import", "tags": [], "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"", "custom": "pipeline: suricata.common" } } } } } }'
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Import - Zeek logs
|
||||||
|
echo
|
||||||
|
echo "Setting up Zeek import package policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "import-zeek-logs", "description": "Zeek Import logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/zeek/logs/*.log"], "data_stream.dataset": "import", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", "custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n" } } } } } }'
|
||||||
|
echo
|
||||||
|
|
||||||
# Strelka logs
|
# Strelka logs
|
||||||
echo "Setting up Strelka package policy"
|
echo
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "strelka-logs", "name": "strelka-logs", "description": "Strelka logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/strelka/log/strelka.log" ], "data_stream.dataset": "file", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka", "custom": "pipeline: strelka.file" }}}}}}'
|
echo "Setting up Strelka package policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "strelka-logs", "name": "strelka-logs", "description": "Strelka logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/strelka/log/strelka.log" ], "data_stream.dataset": "strelka", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka", "custom": "pipeline: strelka.file" }}}}}}'
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# Syslog TCP Port 514
|
# Syslog TCP Port 514
|
||||||
@@ -62,7 +73,7 @@ echo
|
|||||||
# Kratos logs
|
# Kratos logs
|
||||||
echo
|
echo
|
||||||
echo "Setting up Kratos package policy..."
|
echo "Setting up Kratos package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "kratos-logs", "name": "kratos-logs", "description": "Kratos logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kratos/kratos.log" ], "data_stream.dataset": "kratos", "tags": [],"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- rename:\n fields:\n - from: \"audience\"\n to: \"event.dataset\"\n ignore_missing: true\n- add_fields:\n when:\n not: \n has_fields: ['event.dataset']\n target: ''\n fields:\n event.dataset: access", "custom": "pipeline: kratos" }}}}}}'
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "kratos-logs", "name": "kratos-logs", "description": "Kratos logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kratos/kratos.log" ], "data_stream.dataset": "kratos", "tags": [],"custom":"pipeline: kratos","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos" }}}}}}'
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# RITA Logs
|
# RITA Logs
|
||||||
@@ -73,24 +84,54 @@ echo
|
|||||||
|
|
||||||
# Elasticsearch logs
|
# Elasticsearch logs
|
||||||
echo
|
echo
|
||||||
echo "Seting up Elasticsearch package policy..."
|
echo "Setting up Elasticsearch package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "elasticsearch", "version": "1.0.0" }, "id": "elasticsearch-logs", "name": "elasticsearch-logs", "description": "Elasticsearch Logs", "namespace": "default", "inputs": { "elasticsearch-logfile": { "enabled": true, "streams": { "elasticsearch.audit": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_audit.json" ] } }, "elasticsearch.deprecation": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_deprecation.json" ] } }, "elasticsearch.gc": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/gc.log.[0-9]*", "/var/log/elasticsearch/gc.log" ] } }, "elasticsearch.server": { "enabled": true, "vars": { "paths": [ "/opt/so/log/elasticsearch/*.log" ] } }, "elasticsearch.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_index_search_slowlog.json", "/var/log/elasticsearch/*_index_indexing_slowlog.json" ] } } } }, "elasticsearch-elasticsearch/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9200" ], "scope": "node" }, "streams": { "elasticsearch.stack_monitoring.ccr": { "enabled": false }, "elasticsearch.stack_monitoring.cluster_stats": { "enabled": false }, "elasticsearch.stack_monitoring.enrich": { "enabled": false }, "elasticsearch.stack_monitoring.index": { "enabled": false }, "elasticsearch.stack_monitoring.index_recovery": { "enabled": false, "vars": { "active.only": true } }, "elasticsearch.stack_monitoring.index_summary": { "enabled": false }, "elasticsearch.stack_monitoring.ml_job": { "enabled": false }, "elasticsearch.stack_monitoring.node": { "enabled": false }, "elasticsearch.stack_monitoring.node_stats": { "enabled": false }, "elasticsearch.stack_monitoring.pending_tasks": { "enabled": false }, "elasticsearch.stack_monitoring.shard": { "enabled": false } } } } }'
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "elasticsearch", "version": "1.0.0" }, "id": "elasticsearch-logs", "name": "elasticsearch-logs", "description": "Elasticsearch Logs", "namespace": "default", "inputs": { "elasticsearch-logfile": { "enabled": true, "streams": { "elasticsearch.audit": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_audit.json" ] } }, "elasticsearch.deprecation": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_deprecation.json" ] } }, "elasticsearch.gc": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/gc.log.[0-9]*", "/var/log/elasticsearch/gc.log" ] } }, "elasticsearch.server": { "enabled": true, "vars": { "paths": [ "/opt/so/log/elasticsearch/*.log" ] } }, "elasticsearch.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_index_search_slowlog.json", "/var/log/elasticsearch/*_index_indexing_slowlog.json" ] } } } }, "elasticsearch-elasticsearch/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9200" ], "scope": "node" }, "streams": { "elasticsearch.stack_monitoring.ccr": { "enabled": false }, "elasticsearch.stack_monitoring.cluster_stats": { "enabled": false }, "elasticsearch.stack_monitoring.enrich": { "enabled": false }, "elasticsearch.stack_monitoring.index": { "enabled": false }, "elasticsearch.stack_monitoring.index_recovery": { "enabled": false, "vars": { "active.only": true } }, "elasticsearch.stack_monitoring.index_summary": { "enabled": false }, "elasticsearch.stack_monitoring.ml_job": { "enabled": false }, "elasticsearch.stack_monitoring.node": { "enabled": false }, "elasticsearch.stack_monitoring.node_stats": { "enabled": false }, "elasticsearch.stack_monitoring.pending_tasks": { "enabled": false }, "elasticsearch.stack_monitoring.shard": { "enabled": false } } } } }'
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# Logstash logs
|
# Logstash logs
|
||||||
echo
|
#echo
|
||||||
echo "Setting up Logstash package policy..."
|
#echo "Setting up Logstash package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "logstash", "version": "2.0.0" }, "id": "logstash-logs", "name": "logstash-logs", "description": "Logstash logs", "namespace": "default", "inputs": { "logstash-logfile": { "enabled": true, "streams": { "logstash.log": { "enabled": true, "vars": { "paths": [ "/opt/so/logs/logstash/logstash.log" ] } }, "logstash.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/logstash/logstash-slowlog-plain*.log", "/var/log/logstash/logstash-slowlog-json*.log" ] } } } }, "logstash-logstash/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9600" ], "period": "10s" }, "streams": { "logstash.stack_monitoring.node": { "enabled": false }, "logstash.stack_monitoring.node_stats": { "enabled": false } } } } }'
|
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "logstash", "version": "2.0.0" }, "id": "logstash-logs", "name": "logstash-logs", "description": "Logstash logs", "namespace": "default", "inputs": { "logstash-logfile": { "enabled": true, "streams": { "logstash.log": { "enabled": true, "vars": { "paths": [ "/opt/so/logs/logstash/logstash.log" ] } }, "logstash.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/logstash/logstash-slowlog-plain*.log", "/var/log/logstash/logstash-slowlog-json*.log" ] } } } }, "logstash-logstash/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9600" ], "period": "10s" }, "streams": { "logstash.stack_monitoring.node": { "enabled": false }, "logstash.stack_monitoring.node_stats": { "enabled": false } } } } }'
|
||||||
echo
|
#echo
|
||||||
|
|
||||||
# Kibana logs
|
# Kibana logs
|
||||||
echo
|
#echo
|
||||||
echo "Setting up Kibana package policy..."
|
#echo "Setting up Kibana package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "kibana", "version": "2.0.0" }, "id": "kibana-logs", "name": "kibana-logs", "description": "Kibana logs", "namespace": "default", "inputs": { "kibana-logfile": { "enabled": true, "streams": { "kibana.audit": { "enabled": false, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } }, "kibana.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } } } }, "kibana-kibana/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:5601" ] }, "streams": { "kibana.stack_monitoring.cluster_actions": { "enabled": false }, "kibana.stack_monitoring.cluster_rules": { "enabled": false }, "kibana.stack_monitoring.node_actions": { "enabled": false }, "kibana.stack_monitoring.node_rules": { "enabled": false }, "kibana.stack_monitoring.stats": { "enabled": false }, "kibana.stack_monitoring.status": { "enabled": false } } } } }'
|
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "kibana", "version": "2.0.0" }, "id": "kibana-logs", "name": "kibana-logs", "description": "Kibana logs", "namespace": "default", "inputs": { "kibana-logfile": { "enabled": true, "streams": { "kibana.audit": { "enabled": false, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } }, "kibana.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } } } }, "kibana-kibana/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:5601" ] }, "streams": { "kibana.stack_monitoring.cluster_actions": { "enabled": false }, "kibana.stack_monitoring.cluster_rules": { "enabled": false }, "kibana.stack_monitoring.node_actions": { "enabled": false }, "kibana.stack_monitoring.node_rules": { "enabled": false }, "kibana.stack_monitoring.stats": { "enabled": false }, "kibana.stack_monitoring.status": { "enabled": false } } } } }'
|
||||||
echo
|
#echo
|
||||||
|
|
||||||
# Redis logs
|
# Redis logs
|
||||||
echo
|
echo
|
||||||
echo "Setting up Redis package policy..."
|
echo "Setting up Redis package policy..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "redis", "version": "1.4.0" }, "id": "redis-logs", "name": "redis-logs", "description": "Redis logs", "namespace": "default", "inputs": { "redis-logfile": { "enabled": true, "streams": { "redis.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/redis/redis.log" ], "tags": [ "redis-log" ], "preserve_original_event": false } } } }, "redis-redis": { "enabled": false, "streams": { "redis.slowlog": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "password": "" } } } }, "redis-redis/metrics": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "idle_timeout": "20s", "maxconn": 10, "network": "tcp", "password": "" }, "streams": { "redis.info": { "enabled": false, "vars": { "period": "10s" } }, "redis.key": { "enabled": false, "vars": { "key.patterns": "- limit: 20\n pattern: '*'\n", "period": "10s" } }, "redis.keyspace": { "enabled": false, "vars": { "period": "10s" } } } } } }'
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "redis", "version": "1.4.0" }, "id": "redis-logs", "name": "redis-logs", "description": "Redis logs", "namespace": "default", "inputs": { "redis-logfile": { "enabled": true, "streams": { "redis.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/redis/redis.log" ], "tags": [ "redis-log" ], "preserve_original_event": false } } } }, "redis-redis": { "enabled": false, "streams": { "redis.slowlog": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "password": "" } } } }, "redis-redis/metrics": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "idle_timeout": "20s", "maxconn": 10, "network": "tcp", "password": "" }, "streams": { "redis.info": { "enabled": false, "vars": { "period": "10s" } }, "redis.key": { "enabled": false, "vars": { "key.patterns": "- limit: 20\n pattern: '*'\n", "period": "10s" } }, "redis.keyspace": { "enabled": false, "vars": { "period": "10s" } } } } } }'
|
||||||
echo
|
echo
|
||||||
|
|
||||||
|
# IDH logs
|
||||||
|
echo
|
||||||
|
echo "Setting up IDH package policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"policy_id":"so-grid-nodes","package":{"name":"log","version":"1.1.1"},"id":"idh-logs","name":"idh-logs","namespace":"so","description":"IDH integration","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/nsm/idh/opencanary.log"],"data_stream.dataset":"idh","custom":"pipeline: common","processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '\''[\"prospector\", \"input\", \"offset\", \"beat\"]'\''\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary","tags":[]}}}}}}'
|
||||||
|
echo
|
||||||
|
|
||||||
|
# SOC - Server logs
|
||||||
|
echo
|
||||||
|
echo "Setting up SOC - Server Logs package policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-server-logs","namespace":"so","description":"Security Onion Console Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/sensoroni-server.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true","tags":[]}}}}}}'
|
||||||
|
echo
|
||||||
|
|
||||||
|
# SOC - Sensoroni logs
|
||||||
|
echo
|
||||||
|
echo "Setting up SOC - Sensoroni Logs package policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-sensoroni-logs","namespace":"so","description":"Security Onion - Sensoroni - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/sensoroni/sensoroni.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true","tags":[]}}}}}}'
|
||||||
|
echo
|
||||||
|
|
||||||
|
# SOC - Elastic Auth Sync logs
|
||||||
|
echo
|
||||||
|
echo "Setting up SOC - Elastic Auth Sync Logs package policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-auth-sync-logs","namespace":"so","description":"Security Onion - Elastic Auth Sync - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/sync.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync","tags":[]}}}}}}'
|
||||||
|
echo
|
||||||
|
|
||||||
|
# SOC - Salt Relay logs
|
||||||
|
echo
|
||||||
|
echo "Setting up SOC - Salt_Relay Logs package policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-salt-relay-logs","namespace":"so","description":"Security Onion - Salt Relay - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/salt-relay.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay","tags":[]}}}}}}'
|
||||||
|
echo
|
||||||
|
|||||||
@@ -24,14 +24,16 @@ mkdir -p /opt/so/conf/elastic-fleet/certs
|
|||||||
cp /etc/ssl/certs/intca.crt /opt/so/conf/elastic-fleet/certs
|
cp /etc/ssl/certs/intca.crt /opt/so/conf/elastic-fleet/certs
|
||||||
cp /etc/pki/elasticfleet* /opt/so/conf/elastic-fleet/certs
|
cp /etc/pki/elasticfleet* /opt/so/conf/elastic-fleet/certs
|
||||||
|
|
||||||
# Add Local Elasticsearch Ouput for Fleet Server
|
{% if grains.role in ['so-import', 'so-eval'] %}
|
||||||
|
# Add SO-Manager Elasticsearch Ouput
|
||||||
ESCACRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt)
|
ESCACRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt)
|
||||||
JSON_STRING=$( jq -n \
|
JSON_STRING=$( jq -n \
|
||||||
--arg ESCACRT "$ESCACRT" \
|
--arg ESCACRT "$ESCACRT" \
|
||||||
'{"name":"so-manager_elasticsearch2","id":"so-manager_elasticsearch2","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
|
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||||
printf "\n\n"
|
printf "\n\n"
|
||||||
|
|
||||||
|
{% else %}
|
||||||
# Create Logstash Output payload
|
# Create Logstash Output payload
|
||||||
LOGSTASHCRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/elasticfleet.crt)
|
LOGSTASHCRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/elasticfleet.crt)
|
||||||
LOGSTASHKEY=$(openssl rsa -in /opt/so/conf/elastic-fleet/certs/elasticfleet.key)
|
LOGSTASHKEY=$(openssl rsa -in /opt/so/conf/elastic-fleet/certs/elasticfleet.key)
|
||||||
@@ -42,6 +44,7 @@ JSON_STRING=$( jq -n \
|
|||||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||||
'{"name":"so-manager_logstash","id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}'
|
'{"name":"so-manager_logstash","id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}'
|
||||||
)
|
)
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
# Add SO-Manager Logstash Ouput
|
# Add SO-Manager Logstash Ouput
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||||
@@ -81,21 +84,26 @@ printf '%s\n'\
|
|||||||
" url: '{{ GLOBALS.manager_ip }}'"\
|
" url: '{{ GLOBALS.manager_ip }}'"\
|
||||||
"" >> "$pillar_file"
|
"" >> "$pillar_file"
|
||||||
|
|
||||||
|
#Store Grid Nodes Enrollment token in Global pillar
|
||||||
|
global_pillar_file=/opt/so/saltstack/local/pillar/soc_global.sls
|
||||||
|
printf '%s\n'\
|
||||||
|
" fleet_grid_enrollment_token: '$GRIDNODESENROLLMENTOKEN'"\
|
||||||
|
"" >> "$global_pillar_file"
|
||||||
|
|
||||||
# Call Elastic-Fleet Salt State
|
# Call Elastic-Fleet Salt State
|
||||||
salt-call state.apply elastic-fleet queue=True
|
salt-call state.apply elasticfleet queue=True
|
||||||
|
|
||||||
# Load Elastic Fleet integrations
|
# Load Elastic Fleet integrations
|
||||||
/usr/sbin/so-elastic-fleet-integration-policy-load
|
/usr/sbin/so-elastic-fleet-integration-policy-load
|
||||||
|
|
||||||
# Temp
|
# Temp
|
||||||
wget -P /opt/so/saltstack/default/salt/elastic-fleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.4.1/so-elastic-agent-8.4.1-darwin-x86_64.tar.gz
|
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.7.0/so-elastic-agent-8.7.0-darwin-x86_64.tar.gz
|
||||||
wget -P /opt/so/saltstack/default/salt/elastic-fleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.4.1/so-elastic-agent-8.4.1-linux-x86_64.tar.gz
|
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.7.0/so-elastic-agent-8.7.0-linux-x86_64.tar.gz
|
||||||
wget -P /opt/so/saltstack/default/salt/elastic-fleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.4.1/so-elastic-agent-8.4.1-windows-x86_64.tar.gz
|
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.7.0/so-elastic-agent-8.7.0-windows-x86_64.tar.gz
|
||||||
|
|
||||||
#git clone -b 2.4-so-elastic-agent https://github.com/Security-Onion-Solutions/securityonion-image.git
|
#git clone -b 2.4-so-elastic-agent https://github.com/Security-Onion-Solutions/securityonion-image.git
|
||||||
#cd securityonion-image/so-elastic-agent-builder
|
#cd securityonion-image/so-elastic-agent-builder
|
||||||
#docker build -t so-elastic-agent-builder .
|
#docker build -t so-elastic-agent-builder .
|
||||||
|
|
||||||
so-elastic-agent-gen-installers
|
so-elastic-agent-gen-installers
|
||||||
salt-call state.apply elastic-fleet.install_agent_grid queue=True
|
salt-call state.apply elasticfleet.install_agent_grid queue=True
|
||||||
|
|||||||
@@ -22,10 +22,6 @@
|
|||||||
/usr/sbin/so-restart logstash $1
|
/usr/sbin/so-restart logstash $1
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-sensor']%}
|
|
||||||
/usr/sbin/so-restart filebeat $1
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||||
/usr/sbin/so-restart curator $1
|
/usr/sbin/so-restart curator $1
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|||||||
@@ -22,10 +22,6 @@
|
|||||||
/usr/sbin/so-start logstash $1
|
/usr/sbin/so-start logstash $1
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-sensor']%}
|
|
||||||
/usr/sbin/so-start filebeat $1
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||||
/usr/sbin/so-start curator $1
|
/usr/sbin/so-start curator $1
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|||||||
@@ -22,10 +22,6 @@
|
|||||||
/usr/sbin/so-stop logstash $1
|
/usr/sbin/so-stop logstash $1
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-sensor']%}
|
|
||||||
/usr/sbin/so-stop filebeat $1
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||||
/usr/sbin/so-stop curator $1
|
/usr/sbin/so-stop curator $1
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|||||||
57
salt/common/tools/sbin/so-elasticsearch-cluster-space-total
Executable file
57
salt/common/tools/sbin/so-elasticsearch-cluster-space-total
Executable file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
TOTAL_AVAILABLE_SPACE=0
|
||||||
|
|
||||||
|
# Wait for ElasticSearch to initialize
|
||||||
|
COUNT=0
|
||||||
|
ELASTICSEARCH_CONNECTED="no"
|
||||||
|
while [[ "$COUNT" -le 240 ]]; do
|
||||||
|
/usr/sbin/so-elasticsearch-query / -k --output /dev/null --silent --head --fail
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
ELASTICSEARCH_CONNECTED="yes"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
((COUNT+=1))
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||||
|
echo
|
||||||
|
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||||
|
echo
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set percentage of space to desired value, otherwise use a default value of 80 percent
|
||||||
|
if [[ "$1" != "" ]]; then
|
||||||
|
PERCENTAGE=$1
|
||||||
|
else
|
||||||
|
PERCENTAGE=80
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
|
||||||
|
{% if GLOBALS.role == 'so-manager' %}
|
||||||
|
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do
|
||||||
|
{% else %}
|
||||||
|
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
|
||||||
|
{% endif %}
|
||||||
|
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
|
||||||
|
unit=$(echo $i | grep -oE '[A-Za-z]+')
|
||||||
|
if [ $unit = "tb" ]; then
|
||||||
|
size=$(( size * 1024 ))
|
||||||
|
fi
|
||||||
|
TOTAL_AVAILABLE_SPACE=$(( TOTAL_AVAILABLE_SPACE + size ))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Calculate the percentage of available space based on our previously defined value
|
||||||
|
PERCENTAGE_AVAILABLE_SPACE=$(( TOTAL_AVAILABLE_SPACE*PERCENTAGE/100 ))
|
||||||
|
echo "$PERCENTAGE_AVAILABLE_SPACE"
|
||||||
28
salt/common/tools/sbin/so-elasticsearch-cluster-space-used
Executable file
28
salt/common/tools/sbin/so-elasticsearch-cluster-space-used
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
TOTAL_AVAILABLE_SPACE=0
|
||||||
|
|
||||||
|
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
|
||||||
|
{% if GLOBALS.role == 'so-manager' %}
|
||||||
|
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do
|
||||||
|
{% else %}
|
||||||
|
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $3}'); do
|
||||||
|
{% endif %}
|
||||||
|
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
|
||||||
|
unit=$(echo $i | grep -oE '[A-Za-z]+')
|
||||||
|
if [ $unit = "tb" ]; then
|
||||||
|
size=$(( size * 1024 ))
|
||||||
|
fi
|
||||||
|
TOTAL_AVAILABLE_SPACE=$(( TOTAL_AVAILABLE_SPACE + size ))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Calculate the percentage of available space based on our previously defined value
|
||||||
|
echo "$TOTAL_AVAILABLE_SPACE"
|
||||||
15
salt/common/tools/sbin/so-elasticsearch-ilm-lifecycle-status
Executable file
15
salt/common/tools/sbin/so-elasticsearch-ilm-lifecycle-status
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||||
|
|
||||||
|
if [ "$1" == "" ]; then
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_all/_ilm/explain | jq .
|
||||||
|
else
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/$1/_ilm/explain | jq .[]
|
||||||
|
fi
|
||||||
@@ -1,12 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-restart grafana $1
|
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||||
|
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X DELETE https://{{ NODEIP }}:9200/_ilm/policy/$1
|
||||||
21
salt/common/tools/sbin/so-elasticsearch-ilm-policy-load
Executable file
21
salt/common/tools/sbin/so-elasticsearch-ilm-policy-load
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
{% import_yaml 'elasticsearch/defaults.yaml' as ESCONFIG with context %}
|
||||||
|
{%- set ES_INDEX_SETTINGS = salt['pillar.get']('elasticsearch:index_settings', default=ESCONFIG.elasticsearch.index_settings, merge=True) %}
|
||||||
|
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||||
|
|
||||||
|
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||||
|
{%- if settings.policy is defined %}
|
||||||
|
echo
|
||||||
|
echo "Setting up {{ index }}-logs policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://{{ NODEIP }}:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||||
|
echo
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
echo
|
||||||
15
salt/common/tools/sbin/so-elasticsearch-ilm-policy-view
Executable file
15
salt/common/tools/sbin/so-elasticsearch-ilm-policy-view
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||||
|
|
||||||
|
if [ "$1" == "" ]; then
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_ilm/policy | jq .
|
||||||
|
else
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_ilm/policy/$1 | jq .[]
|
||||||
|
fi
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-stop filebeat $1
|
so-elasticsearch-ilm-stop
|
||||||
|
so-elasticsearch-ilm-start
|
||||||
12
salt/common/tools/sbin/so-elasticsearch-ilm-start
Executable file
12
salt/common/tools/sbin/so-elasticsearch-ilm-start
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
/bin/bash
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||||
|
|
||||||
|
echo "Starting ILM..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://{{ NODEIP }}:9200/_ilm/start
|
||||||
@@ -1,12 +1,11 @@
|
|||||||
#!/bin/bash
|
/bin/bash
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-restart filebeat $1
|
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||||
|
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_ilm/status | jq .
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-start filebeat $1
|
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||||
|
|
||||||
|
echo "Stopping ILM..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://{{ NODEIP }}:9200/_ilm/stop
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
|
||||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
|
||||||
|
|
||||||
default_conf_dir=/opt/so/conf
|
|
||||||
ELASTICSEARCH_HOST="{{ MYIP }}"
|
|
||||||
ELASTICSEARCH_PORT=9200
|
|
||||||
#ELASTICSEARCH_AUTH=""
|
|
||||||
|
|
||||||
# Define a default directory to load pipelines from
|
|
||||||
FB_MODULE_YML="/usr/share/filebeat/module-setup.yml"
|
|
||||||
|
|
||||||
|
|
||||||
# Wait for ElasticSearch to initialize
|
|
||||||
echo -n "Waiting for ElasticSearch..."
|
|
||||||
COUNT=0
|
|
||||||
ELASTICSEARCH_CONNECTED="no"
|
|
||||||
while [[ "$COUNT" -le 240 ]]; do
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
ELASTICSEARCH_CONNECTED="yes"
|
|
||||||
echo "connected!"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
((COUNT+=1))
|
|
||||||
sleep 1
|
|
||||||
echo -n "."
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
|
||||||
echo
|
|
||||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
echo "Testing to see if the pipelines are already applied"
|
|
||||||
ESVER=$(curl -K /opt/so/conf/elasticsearch/curl.config -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" |jq .version.number |tr -d \")
|
|
||||||
PIPELINES=$(curl -K /opt/so/conf/elasticsearch/curl.config -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"/_ingest/pipeline/filebeat-$ESVER-elasticsearch-server-pipeline | jq . | wc -c)
|
|
||||||
|
|
||||||
if [[ "$PIPELINES" -lt 5 ]] || [ "$2" != "--force" ]; then
|
|
||||||
echo "Setting up ingest pipeline(s)"
|
|
||||||
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
|
|
||||||
{%- for module in MODULESMERGED.modules.keys() %}
|
|
||||||
{%- for fileset in MODULESMERGED.modules[module] %}
|
|
||||||
echo "{{ module }}.{{ fileset}}"
|
|
||||||
docker exec -i so-filebeat filebeat setup --pipelines --modules {{ module }} -M "{{ module }}.{{ fileset }}.enabled=true" -c $FB_MODULE_YML
|
|
||||||
sleep 0.5
|
|
||||||
{% endfor %}
|
|
||||||
{%- endfor %}
|
|
||||||
else
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
@@ -43,7 +43,7 @@ APPLY=${APPLY,,}
|
|||||||
|
|
||||||
function rolecall() {
|
function rolecall() {
|
||||||
THEROLE=$1
|
THEROLE=$1
|
||||||
THEROLES="analyst analyst_workstations beats_endpoint beats_endpoint_ssl elastic_agent_endpoint elasticsearch_rest endgame eval heavynodes idh manager receivers searchnodes sensors standalone strelka_frontend syslog"
|
THEROLES="analyst analyst_workstations beats_endpoint beats_endpoint_ssl elastic_agent_endpoint elasticsearch_rest endgame eval heavynodes idh manager managersearch receivers searchnodes sensors standalone strelka_frontend syslog"
|
||||||
|
|
||||||
for AROLE in $THEROLES; do
|
for AROLE in $THEROLES; do
|
||||||
if [ "$AROLE" = "$THEROLE" ]; then
|
if [ "$AROLE" = "$THEROLE" ]; then
|
||||||
@@ -97,6 +97,8 @@ echo "$IP" >> $local_salt_dir/hostgroups/$ROLE
|
|||||||
if [ "$APPLY" = "true" ]; then
|
if [ "$APPLY" = "true" ]; then
|
||||||
echo "Applying the firewall rules"
|
echo "Applying the firewall rules"
|
||||||
salt-call state.apply firewall queue=True
|
salt-call state.apply firewall queue=True
|
||||||
|
echo "Firewall rules have been applied... Review logs further if there were errors."
|
||||||
|
echo ""
|
||||||
else
|
else
|
||||||
echo "Firewall rules will be applied next salt run"
|
echo "Firewall rules will be applied next salt run"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -54,25 +54,25 @@ fi
|
|||||||
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
|
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
|
||||||
so-firewall --role=manager --ip="$IP"
|
so-firewall --role=manager --ip="$IP"
|
||||||
so-firewall --role=sensors --ip="$IP"
|
so-firewall --role=sensors --ip="$IP"
|
||||||
so-firewall --apply --role=searchnodes --ip="$IP"
|
so-firewall --apply=true --role=searchnodes --ip="$IP"
|
||||||
;;
|
;;
|
||||||
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER')
|
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER')
|
||||||
case "$ROLE" in
|
case "$ROLE" in
|
||||||
'SENSOR')
|
'SENSOR')
|
||||||
so-firewall --apply --role=sensors --ip="$IP"
|
so-firewall --apply=true --role=sensors --ip="$IP"
|
||||||
;;
|
;;
|
||||||
'SEARCHNODE')
|
'SEARCHNODE')
|
||||||
so-firewall --apply --role=searchnodes --ip="$IP"
|
so-firewall --apply=true --role=searchnodes --ip="$IP"
|
||||||
;;
|
;;
|
||||||
'HEAVYNODE')
|
'HEAVYNODE')
|
||||||
so-firewall --role=sensors --ip="$IP"
|
so-firewall --role=sensors --ip="$IP"
|
||||||
so-firewall --apply --role=heavynodes --ip="$IP"
|
so-firewall --apply=true --role=heavynodes --ip="$IP"
|
||||||
;;
|
;;
|
||||||
'IDH')
|
'IDH')
|
||||||
so-firewall --apply --role=beats_endpoint_ssl --ip="$IP"
|
so-firewall --apply=true --role=sensors --ip="$IP"
|
||||||
;;
|
;;
|
||||||
'RECEIVER')
|
'RECEIVER')
|
||||||
so-firewall --apply --role=receivers --ip="$IP"
|
so-firewall --apply=true --role=receivers --ip="$IP"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
|
|
||||||
|
|
||||||
# Exit if an error occurs. The next highstate will retry.
|
|
||||||
set -e
|
|
||||||
|
|
||||||
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
|
|
||||||
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
|
|
||||||
|
|
||||||
for row in $folders; do
|
|
||||||
title=$(echo ${row} | base64 --decode | jq -r '.title')
|
|
||||||
uid=$(echo ${row} | base64 --decode | jq -r '.uid')
|
|
||||||
|
|
||||||
if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
|
|
||||||
curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
/usr/sbin/so-start grafana $1
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
/usr/sbin/so-stop grafana $1
|
|
||||||
@@ -25,7 +25,6 @@ container_list() {
|
|||||||
if [ $MANAGERCHECK == 'so-import' ]; then
|
if [ $MANAGERCHECK == 'so-import' ]; then
|
||||||
TRUSTED_CONTAINERS=(
|
TRUSTED_CONTAINERS=(
|
||||||
"so-elasticsearch"
|
"so-elasticsearch"
|
||||||
"so-filebeat"
|
|
||||||
"so-idstools"
|
"so-idstools"
|
||||||
"so-influxdb"
|
"so-influxdb"
|
||||||
"so-kibana"
|
"so-kibana"
|
||||||
@@ -47,8 +46,6 @@ container_list() {
|
|||||||
"so-elastic-agent"
|
"so-elastic-agent"
|
||||||
"so-elastic-agent-builder"
|
"so-elastic-agent-builder"
|
||||||
"so-elasticsearch"
|
"so-elasticsearch"
|
||||||
"so-filebeat"
|
|
||||||
"so-grafana"
|
|
||||||
"so-idh"
|
"so-idh"
|
||||||
"so-idstools"
|
"so-idstools"
|
||||||
"so-influxdb"
|
"so-influxdb"
|
||||||
@@ -73,7 +70,6 @@ container_list() {
|
|||||||
)
|
)
|
||||||
else
|
else
|
||||||
TRUSTED_CONTAINERS=(
|
TRUSTED_CONTAINERS=(
|
||||||
"so-filebeat"
|
|
||||||
"so-idstools"
|
"so-idstools"
|
||||||
"so-elasticsearch"
|
"so-elasticsearch"
|
||||||
"so-logstash"
|
"so-logstash"
|
||||||
|
|||||||
@@ -32,24 +32,13 @@ function evtx2es() {
|
|||||||
EVTX=$1
|
EVTX=$1
|
||||||
HASH=$2
|
HASH=$2
|
||||||
|
|
||||||
ES_PASS=$(lookup_pillar "auth:users:so_elastic_user:pass" "elasticsearch")
|
|
||||||
ES_USER=$(lookup_pillar "auth:users:so_elastic_user:user" "elasticsearch")
|
|
||||||
|
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v "$EVTX:/tmp/$RUNID.evtx" \
|
-v "$EVTX:/tmp/data.evtx" \
|
||||||
--entrypoint evtx2es \
|
-v "/nsm/import/$HASH/evtx/:/tmp/evtx/" \
|
||||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \
|
|
||||||
--host {{ MANAGERIP }} --scheme https \
|
|
||||||
--index so-beats-$INDEX_DATE --pipeline import.wel \
|
|
||||||
--login $ES_USER --pwd "$ES_PASS" \
|
|
||||||
"/tmp/$RUNID.evtx" >> $LOG_FILE 2>&1
|
|
||||||
|
|
||||||
docker run --rm \
|
|
||||||
-v "$EVTX:/tmp/import.evtx" \
|
|
||||||
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||||
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
|
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
|
||||||
--entrypoint '/evtx_calc_timestamps.sh' \
|
--entrypoint "/evtx_calc_timestamps.sh" \
|
||||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}
|
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} >> $LOG_FILE 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
# if no parameters supplied, display usage
|
# if no parameters supplied, display usage
|
||||||
|
|||||||
@@ -148,11 +148,12 @@ for PCAP in "$@"; do
|
|||||||
# generate IDS alerts and write them to standard pipeline
|
# generate IDS alerts and write them to standard pipeline
|
||||||
echo "- analyzing traffic with Suricata"
|
echo "- analyzing traffic with Suricata"
|
||||||
suricata "${PCAP}" $HASH
|
suricata "${PCAP}" $HASH
|
||||||
|
{% if salt['pillar.get']('global:mdengine') == 'ZEEK' %}
|
||||||
# generate Zeek logs and write them to a unique subdirectory in /nsm/import/bro/
|
# generate Zeek logs and write them to a unique subdirectory in /nsm/import/zeek/
|
||||||
# since each run writes to a unique subdirectory, there is no need for a lock file
|
# since each run writes to a unique subdirectory, there is no need for a lock file
|
||||||
echo "- analyzing traffic with Zeek"
|
echo "- analyzing traffic with Zeek"
|
||||||
zeek "${PCAP}" $HASH
|
zeek "${PCAP}" $HASH
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
|
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
|
||||||
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
|
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
|
||||||
|
|||||||
@@ -1,45 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
wdurregex="^[0-9]+w$"
|
|
||||||
ddurregex="^[0-9]+d$"
|
|
||||||
|
|
||||||
echo -e "\nThis script is used to reduce the size of InfluxDB by removing old data and retaining only the duration specified."
|
|
||||||
echo "The duration will need to be specified as an integer followed by the duration unit without a space."
|
|
||||||
echo -e "\nFor example, to purge all data but retain the past 12 weeks, specify 12w for the duration."
|
|
||||||
echo "The duration units are as follows:"
|
|
||||||
echo " w - week(s)"
|
|
||||||
echo " d - day(s)"
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
echo ""
|
|
||||||
read -p 'Enter the duration of past data that you would like to retain: ' duration
|
|
||||||
duration=$(echo $duration | tr '[:upper:]' '[:lower:]')
|
|
||||||
|
|
||||||
if [[ "$duration" =~ $wdurregex ]] || [[ "$duration" =~ $ddurregex ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "\nInvalid duration."
|
|
||||||
done
|
|
||||||
|
|
||||||
echo -e "\nInfluxDB will now be cleaned and leave only the past $duration worth of data."
|
|
||||||
read -r -p "Are you sure you want to continue? [y/N] " yorn
|
|
||||||
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
|
||||||
echo -e "\nCleaning InfluxDb and saving only the past $duration. This may could take several minutes depending on how much data needs to be cleaned."
|
|
||||||
if docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"DELETE FROM /.*/ WHERE \"time\" >= '2020-01-01T00:00:00.0000000Z' AND \"time\" <= now() - $duration\""; then
|
|
||||||
echo -e "\nInfluxDb clean complete."
|
|
||||||
else
|
|
||||||
echo -e "\nSomething went wrong with cleaning InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "\nExiting as requested."
|
|
||||||
fi
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
{%- set role = grains.id.split('_') | last %}
|
|
||||||
{%- if role in ['manager', 'managersearch', 'eval', 'standalone'] %}
|
|
||||||
{%- import_yaml 'influxdb/defaults.yaml' as default_settings %}
|
|
||||||
{%- set influxdb = salt['grains.filter_by'](default_settings, default='influxdb', merge=salt['pillar.get']('influxdb', {})) %}
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
echo -e "\nThis script is used to reduce the size of InfluxDB by downsampling old data into the so_long_term retention policy."
|
|
||||||
|
|
||||||
echo -e "\nInfluxDB will now be downsampled. This could take a few hours depending on how large the database is and hardware resources available."
|
|
||||||
read -r -p "Are you sure you want to continue? [y/N] " yorn
|
|
||||||
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
|
||||||
echo -e "\nDownsampling InfluxDb started at `date`. This may take several hours depending on how much data needs to be downsampled."
|
|
||||||
|
|
||||||
{% for dest_rp in influxdb.downsample.keys() -%}
|
|
||||||
{% for measurement in influxdb.downsample[dest_rp].get('measurements', []) -%}
|
|
||||||
|
|
||||||
day=0
|
|
||||||
startdate=`date`
|
|
||||||
while docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"SELECT mean(*) INTO \"so_long_term\".\"{{measurement}}\" FROM \"autogen\".\"{{measurement}}\" WHERE \"time\" >= '2020-07-21T00:00:00.0000000Z' + ${day}d AND \"time\" <= '2020-07-21T00:00:00.0000000Z' + $((day+1))d GROUP BY time(5m),*\""; do
|
|
||||||
# why 2020-07-21?
|
|
||||||
migrationdate=`date -d "2020-07-21 + ${day} days" +"%y-%m-%d"`
|
|
||||||
|
|
||||||
echo "Downsampling of measurement: {{measurement}} from $migrationdate started at $startdate and completed at `date`."
|
|
||||||
|
|
||||||
newdaytomigrate=$(date -d "$migrationdate + 1 days" +"%s")
|
|
||||||
today=$(date +"%s")
|
|
||||||
if [ $newdaytomigrate -ge $today ]; then
|
|
||||||
break
|
|
||||||
else
|
|
||||||
((day=day+1))
|
|
||||||
startdate=`date`
|
|
||||||
echo -e "\nDownsampling the next day's worth of data for measurement: {{measurement}}."
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
{% endfor -%}
|
|
||||||
{% endfor -%}
|
|
||||||
|
|
||||||
echo -e "\nInfluxDb data downsampling complete."
|
|
||||||
|
|
||||||
else
|
|
||||||
echo -e "\nExiting as requested."
|
|
||||||
fi
|
|
||||||
{%- else %}
|
|
||||||
echo -e "\nThis script can only be run on a node running InfluxDB."
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
echo -e "\nThis script is used to reduce the size of InfluxDB by dropping the autogen retention policy."
|
|
||||||
echo "If you want to retain historical data prior to 2.3.60, then this should only be run after you have downsampled your data using so-influxdb-downsample."
|
|
||||||
|
|
||||||
echo -e "\nThe autogen retention policy will now be dropped from InfluxDB."
|
|
||||||
read -r -p "Are you sure you want to continue? [y/N] " yorn
|
|
||||||
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
|
||||||
echo -e "\nDropping autogen retention policy."
|
|
||||||
if docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -execute "drop retention policy autogen on telegraf"; then
|
|
||||||
echo -e "\nAutogen retention policy dropped from InfluxDb."
|
|
||||||
else
|
|
||||||
echo -e "\nSomething went wrong dropping then autogen retention policy from InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "\nExiting as requested."
|
|
||||||
fi
|
|
||||||
285
salt/common/tools/sbin/so-influxdb-manage
Normal file
285
salt/common/tools/sbin/so-influxdb-manage
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 <operation> [args]"
|
||||||
|
echo ""
|
||||||
|
echo "Supported Operations:"
|
||||||
|
echo " dashboardpath Returns the URL path for a dashboard, requires: <name-of-dashboard>"
|
||||||
|
echo " export Exports all templates to stdout"
|
||||||
|
echo " setup Loads all templates and creates all required buckets"
|
||||||
|
echo " userlist Lists users"
|
||||||
|
echo " useradd Adds a new user, requires: <email>"
|
||||||
|
echo " userdel Removes an existing user, requires: <email>"
|
||||||
|
echo " userenable Enables a user, requires: <email>"
|
||||||
|
echo " userdisable Disables a user, requires: <email>"
|
||||||
|
echo " userpass Updates a user's password, requires: <email>"
|
||||||
|
echo " userpromote Promotes a user to admin: <email>"
|
||||||
|
echo " userdemote Demotes a user from admin: <email>"
|
||||||
|
echo ""
|
||||||
|
echo "If required, the password will be read from STDIN."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ $# -lt 1 ]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
COMMAND=$(basename $0)
|
||||||
|
OP=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
log() {
|
||||||
|
echo -e "$(date) | $COMMAND | $@" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
check_response() {
|
||||||
|
response=$1
|
||||||
|
if [[ "$response" =~ "\"code\":" ]]; then
|
||||||
|
log "Failed. Check the response for more details.\n$response"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
request() {
|
||||||
|
curl -skK /opt/so/conf/influxdb/curl.config "https://localhost:8086/api/v2/$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup_user_id() {
|
||||||
|
email=$1
|
||||||
|
|
||||||
|
response=$(request users?limit=100)
|
||||||
|
check_response "$response"
|
||||||
|
uid=$(echo "$response" | jq -r ".users[] | select(.name == \"$email\").id")
|
||||||
|
if [[ -z "$uid" ]]; then
|
||||||
|
log "User not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "$uid"
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup_stack_id() {
|
||||||
|
oid=$1
|
||||||
|
|
||||||
|
response=$(request "stacks?orgID=$oid&name=Security+Onion")
|
||||||
|
check_response "$response"
|
||||||
|
stackid=$(echo "$response" | jq -r ".stacks[0].id")
|
||||||
|
if [[ -z "$stackid" || "$stackid" == null ]]; then
|
||||||
|
response=$(request stacks -X POST -d "{\"name\":\"Security Onion\",\"orgID\":\"$oid\"}")
|
||||||
|
check_response "$response"
|
||||||
|
stackid=$(echo "$response" | jq -r .id)
|
||||||
|
fi
|
||||||
|
echo "$stackid"
|
||||||
|
}
|
||||||
|
|
||||||
|
change_password() {
|
||||||
|
uid=$1
|
||||||
|
|
||||||
|
set +e
|
||||||
|
test -t 0
|
||||||
|
if [[ $? == 0 ]]; then
|
||||||
|
echo "Enter new password:"
|
||||||
|
fi
|
||||||
|
set -e
|
||||||
|
read -rs pass
|
||||||
|
check_password_and_exit "$pass"
|
||||||
|
response=$(request users/$uid/password -X POST -d "{\"password\":\"$pass\"}")
|
||||||
|
check_response "$response"
|
||||||
|
}
|
||||||
|
|
||||||
|
apply_templates() {
|
||||||
|
oid=$1
|
||||||
|
stackid=$2
|
||||||
|
template_objects_array=$3
|
||||||
|
|
||||||
|
body="{\"orgID\":\"$oid\",\"stackID\":\"$stackid\",\"templates\":$template_objects_array}"
|
||||||
|
response=$(request templates/apply -X POST -d "$body")
|
||||||
|
check_response "$response"
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_bucket() {
|
||||||
|
oid=$1
|
||||||
|
name=$2
|
||||||
|
age=$3
|
||||||
|
shardduration=$4
|
||||||
|
|
||||||
|
response=$(request "buckets?orgID=$oid&name=$name")
|
||||||
|
bucketid=$(echo "$response" | jq -r ".buckets[0].id")
|
||||||
|
if [[ -z "$bucketid" || "$bucketid" == null ]]; then
|
||||||
|
response=$(request buckets -X POST -d "{\"name\":\"$name\",\"orgID\":\"$oid\"}")
|
||||||
|
check_response "$response"
|
||||||
|
bucketid=$(echo "$response" | jq -r .id)
|
||||||
|
fi
|
||||||
|
response=$(request buckets/$bucketid -X PATCH -d "{\"name\":\"$name\",\"retentionRules\":[{\"everySeconds\":$age,\"shardGroupDurationSeconds\":$shardduration,\"type\":\"expire\"}]}")
|
||||||
|
check_response "$response"
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup_org_id_with_wait() {
|
||||||
|
max_attempts=30
|
||||||
|
attempts=0
|
||||||
|
wait=10
|
||||||
|
while [[ $attempts -lt $max_attempts ]]; do
|
||||||
|
response=$(request orgs?org=Security+Onion)
|
||||||
|
oid=$(echo "$response" | jq -r ".orgs[] | select(.name == \"Security Onion\").id")
|
||||||
|
if [[ -z $oid ]]; then
|
||||||
|
attempts=$((attempts+1))
|
||||||
|
log "Server does not appear to be running or fully initialized - will try again in $wait seconds ($attempts / $max_attempts)"
|
||||||
|
sleep $wait
|
||||||
|
else
|
||||||
|
echo "$oid"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
log "Server has not started after $max_attempts attempts - aborting"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
oid=$(lookup_org_id_with_wait)
|
||||||
|
|
||||||
|
case "$OP" in
|
||||||
|
|
||||||
|
setup)
|
||||||
|
log "Ensuring organization is setup correctly"
|
||||||
|
|
||||||
|
# Load templates if at least one has been modified since the last setup
|
||||||
|
newest=$(ls -1t /opt/so/conf/influxdb/templates/ | head -1)
|
||||||
|
if [ /opt/so/conf/influxdb/templates/$newest -nt /opt/so/conf/influxdb/last_template_setup ]; then
|
||||||
|
log "Updating templates"
|
||||||
|
stackid=$(lookup_stack_id "$oid")
|
||||||
|
for file in /opt/so/conf/influxdb/templates/*; do
|
||||||
|
if [[ "$templates_array" != "" ]]; then
|
||||||
|
templates_array="$templates_array,"
|
||||||
|
fi
|
||||||
|
template=$(cat "$file")
|
||||||
|
templates_array="$templates_array{\"contents\":$template}"
|
||||||
|
done
|
||||||
|
apply_templates "$oid" "$stackid" "[$templates_array]"
|
||||||
|
echo $(date) > /opt/so/conf/influxdb/last_template_setup
|
||||||
|
else
|
||||||
|
log "Templates have not been modified since last setup"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Setup buckets and retention periods if at least one has been modified since the last setup
|
||||||
|
if [ /opt/so/conf/influxdb/buckets.json -nt /opt/so/conf/influxdb/last_bucket_setup ]; then
|
||||||
|
log "Updating buckets and retention periods"
|
||||||
|
for rp in so_short_term so_long_term; do
|
||||||
|
bucket=telegraf/$rp
|
||||||
|
log "Ensuring bucket is created and configured; bucket=$bucket"
|
||||||
|
age=$(cat /opt/so/conf/influxdb/buckets.json | jq -r .$rp.duration)
|
||||||
|
shard_duration=$(cat /opt/so/conf/influxdb/buckets.json | jq -r .$rp.shard_duration)
|
||||||
|
setup_bucket "$oid" "$bucket" "$age" "$shard_duration"
|
||||||
|
done
|
||||||
|
echo $(date) > /opt/so/conf/influxdb/last_bucket_setup
|
||||||
|
else
|
||||||
|
log "Buckets have not been modified since last setup"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
userlist)
|
||||||
|
log "Listing existing users"
|
||||||
|
response=$(request users)
|
||||||
|
check_response "$response"
|
||||||
|
echo "$response" | jq -r '.users[] | "\(.id): \(.name) (\(.status))"'
|
||||||
|
;;
|
||||||
|
|
||||||
|
useradd)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
email=$1
|
||||||
|
log "Adding new user; email=$email"
|
||||||
|
response=$(request users -X POST -d "{\"name\":\"$email\"}")
|
||||||
|
check_response "$response"
|
||||||
|
uid=$(echo "$response" | jq -r .id)
|
||||||
|
|
||||||
|
log "Adding new user to organization"
|
||||||
|
response=$(request orgs/$oid/members -X POST -d "{\"id\":\"$uid\"}")
|
||||||
|
check_response "$response"
|
||||||
|
|
||||||
|
change_password "$uid"
|
||||||
|
;;
|
||||||
|
|
||||||
|
userpass)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
email=$1
|
||||||
|
log "Updating user password; email=$email"
|
||||||
|
uid=$(lookup_user_id "$email")
|
||||||
|
change_password "$uid"
|
||||||
|
;;
|
||||||
|
|
||||||
|
userdel)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
email=$1
|
||||||
|
log "Deleting user; email=$email"
|
||||||
|
uid=$(lookup_user_id "$email")
|
||||||
|
response=$(request users/$uid -X DELETE)
|
||||||
|
check_response "$response"
|
||||||
|
;;
|
||||||
|
|
||||||
|
userenable)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
email=$1
|
||||||
|
log "Enabling user; email=$email"
|
||||||
|
uid=$(lookup_user_id "$email")
|
||||||
|
response=$(request users/$uid -X PATCH -d "{\"name\":\"$email\",\"status\":\"active\"}")
|
||||||
|
check_response "$response"
|
||||||
|
;;
|
||||||
|
|
||||||
|
userdisable)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
email=$1
|
||||||
|
log "Disabling user; email=$email"
|
||||||
|
uid=$(lookup_user_id "$email")
|
||||||
|
response=$(request users/$uid -X PATCH -d "{\"name\":\"$email\",\"status\":\"inactive\"}")
|
||||||
|
check_response "$response"
|
||||||
|
;;
|
||||||
|
|
||||||
|
userpromote)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
email=$1
|
||||||
|
log "Promoting user to admin; email=$email"
|
||||||
|
uid=$(lookup_user_id "$email")
|
||||||
|
response=$(request orgs/$oid/members/$uid -X DELETE)
|
||||||
|
response=$(request orgs/$oid/owners -X POST -d "{\"id\":\"$uid\"}")
|
||||||
|
check_response "$response"
|
||||||
|
;;
|
||||||
|
|
||||||
|
userdemote)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
email=$1
|
||||||
|
log "Demoting user from admin; email=$email"
|
||||||
|
uid=$(lookup_user_id "$email")
|
||||||
|
response=$(request orgs/$oid/owners/$uid -X DELETE)
|
||||||
|
response=$(request orgs/$oid/members -X POST -d "{\"id\":\"$uid\"}")
|
||||||
|
check_response "$response"
|
||||||
|
;;
|
||||||
|
|
||||||
|
export)
|
||||||
|
log "Exporting all organization templates"
|
||||||
|
request templates/export -X POST -d "{\"orgIDs\":[{\"orgID\":\"$oid\"}]}" -H "Content-Type: application/json"
|
||||||
|
;;
|
||||||
|
|
||||||
|
dashboardpath)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
name=$1
|
||||||
|
response=$(request dashboards?limit=100&orgID=$oid)
|
||||||
|
check_response "$response"
|
||||||
|
dbid=$(echo "$response" | jq -r ".dashboards[] | select(.name == \"$name\").id")
|
||||||
|
if [[ -z "$dbid" ]]; then
|
||||||
|
log "Dashboard not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo -n "/influxdb/orgs/$oid/dashboards/$dbid"
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
@@ -13,6 +13,6 @@ echo "Setting up default Space:"
|
|||||||
{% if HIGHLANDER %}
|
{% if HIGHLANDER %}
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log
|
||||||
{% else %}
|
{% else %}
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet","fleetv2","securitySolutionCases"]} ' >> /opt/so/log/kibana/misc.log
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCases"]} ' >> /opt/so/log/kibana/misc.log
|
||||||
{% endif %}
|
{% endif %}
|
||||||
echo
|
echo
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ if [[ $# -lt 1 ]]; then
|
|||||||
echo " accept: Accepts a new key and adds the minion files"
|
echo " accept: Accepts a new key and adds the minion files"
|
||||||
echo " delete: Removes the key and deletes the minion files"
|
echo " delete: Removes the key and deletes the minion files"
|
||||||
echo " reject: Rejects a key"
|
echo " reject: Rejects a key"
|
||||||
|
echo " test: Perform minion test"
|
||||||
echo ""
|
echo ""
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -75,6 +76,22 @@ function getinstallinfo() {
|
|||||||
source <(echo $INSTALLVARS)
|
source <(echo $INSTALLVARS)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function testminion() {
|
||||||
|
# Always run on the host, since this is going to be the manager of a distributed grid, or an eval/standalone.
|
||||||
|
# Distributed managers must run this in order for the sensor nodes to have access to the so-tcpreplay image.
|
||||||
|
so-test
|
||||||
|
result=$?
|
||||||
|
|
||||||
|
# If this so-minion script is not running on the given minion ID, run so-test remotely on the sensor as well
|
||||||
|
local_id=$(lookup_grain id)
|
||||||
|
if [[ ! "$local_id" =~ "${MINION_ID}_" ]]; then
|
||||||
|
salt "$MINION_ID" cmd.run 'so-test'
|
||||||
|
result=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit $result
|
||||||
|
}
|
||||||
|
|
||||||
function listminions() {
|
function listminions() {
|
||||||
salt-key list -F --out=json
|
salt-key list -F --out=json
|
||||||
exit $?
|
exit $?
|
||||||
@@ -112,10 +129,6 @@ function add_elastic_to_minion() {
|
|||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
"elasticsearch:"\
|
"elasticsearch:"\
|
||||||
" esheap: '$ES_HEAP_SIZE'"\
|
" esheap: '$ES_HEAP_SIZE'"\
|
||||||
" config:"\
|
|
||||||
" node:"\
|
|
||||||
" attr:"\
|
|
||||||
" box_type: hot"\
|
|
||||||
" " >> $PILLARFILE
|
" " >> $PILLARFILE
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,6 +151,18 @@ function add_fleet_to_minion() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Add IDH Services info to the minion file
|
||||||
|
function add_idh_to_minion() {
|
||||||
|
printf '%s\n'\
|
||||||
|
"idh:"\
|
||||||
|
" restrict_management_ip: $IDH_MGTRESTRICT"\
|
||||||
|
" services:" >> "$PILLARFILE"
|
||||||
|
IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES"
|
||||||
|
for service in ${IDH_SERVICES_ARRAY[@]}; do
|
||||||
|
echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
function add_logstash_to_minion() {
|
function add_logstash_to_minion() {
|
||||||
# Create the logstash advanced pillar
|
# Create the logstash advanced pillar
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
@@ -196,6 +221,12 @@ function add_sensor_to_minion() {
|
|||||||
echo " enabled: True" >> $PILLARFILE
|
echo " enabled: True" >> $PILLARFILE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function updateMine() {
|
||||||
|
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC"
|
||||||
|
}
|
||||||
|
function apply_ES_state() {
|
||||||
|
salt-call state.apply elasticsearch concurrent=True
|
||||||
|
}
|
||||||
function createEVAL() {
|
function createEVAL() {
|
||||||
add_elastic_to_minion
|
add_elastic_to_minion
|
||||||
add_logstash_to_minion
|
add_logstash_to_minion
|
||||||
@@ -207,8 +238,13 @@ function createFLEET() {
|
|||||||
add_logstash_to_minion
|
add_logstash_to_minion
|
||||||
}
|
}
|
||||||
|
|
||||||
function createIDHNODE() {
|
function createFLEET() {
|
||||||
echo "Nothing custom needed for IDH nodes"
|
add_fleet_to_minion
|
||||||
|
add_logstash_to_minion
|
||||||
|
}
|
||||||
|
|
||||||
|
function createIDH() {
|
||||||
|
add_idh_to_minion
|
||||||
}
|
}
|
||||||
|
|
||||||
function createIMPORT() {
|
function createIMPORT() {
|
||||||
@@ -240,6 +276,8 @@ function createSENSOR() {
|
|||||||
function createSEARCHNODE() {
|
function createSEARCHNODE() {
|
||||||
add_elastic_to_minion
|
add_elastic_to_minion
|
||||||
add_logstash_to_minion
|
add_logstash_to_minion
|
||||||
|
updateMine
|
||||||
|
apply_ES_state
|
||||||
}
|
}
|
||||||
|
|
||||||
function createSTANDALONE() {
|
function createSTANDALONE() {
|
||||||
@@ -289,3 +327,7 @@ if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
|
|||||||
create$NODETYPE
|
create$NODETYPE
|
||||||
echo "Minion file created for $MINION_ID"
|
echo "Minion file created for $MINION_ID"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "$OPERATION" = 'test' ]]; then
|
||||||
|
testminion
|
||||||
|
fi
|
||||||
|
|||||||
@@ -17,6 +17,6 @@ salt-call state.apply playbook,playbook.automation_user_create
|
|||||||
|
|
||||||
/usr/sbin/so-soctopus-restart
|
/usr/sbin/so-soctopus-restart
|
||||||
|
|
||||||
echo "Importing Plays - this will take some time...."
|
echo "Importing Plays - NOTE: this will continue after installation finishes and could take an hour or more. Rebooting while the import is in progress will delay playbook imports."
|
||||||
sleep 5
|
sleep 5
|
||||||
so-playbook-ruleupdate >> /root/setup_playbook_rule_update.log 2>&1 &
|
so-playbook-ruleupdate >> /root/setup_playbook_rule_update.log 2>&1 &
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Usage: so-restart filebeat | kibana | playbook
|
# Usage: so-restart kibana | playbook
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
@@ -27,5 +27,5 @@ if [ $# -ge 1 ]; then
|
|||||||
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
echo -e "\nPlease provide an argument by running like so-restart $component, or by using the component-specific script.\nEx. so-restart filebeat, or so-filebeat-restart\n"
|
echo -e "\nPlease provide an argument by running like so-restart $component, or by using the component-specific script.\nEx. so-restart logstash, or so-logstash-restart\n"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Usage: so-start all | filebeat | kibana | playbook
|
# Usage: so-start all | kibana | playbook
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
@@ -27,5 +27,5 @@ if [ $# -ge 1 ]; then
|
|||||||
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
echo -e "\nPlease provide an argument by running like so-start $component, or by using the component-specific script.\nEx. so-start filebeat, or so-filebeat-start\n"
|
echo -e "\nPlease provide an argument by running like so-start $component, or by using the component-specific script.\nEx. so-start logstash, or so-logstash-start\n"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -24,12 +24,14 @@ def showUsage(options, args):
|
|||||||
print(' -h - Prints this usage information')
|
print(' -h - Prints this usage information')
|
||||||
print(' -q - Suppress output; useful for automation of exit code value')
|
print(' -q - Suppress output; useful for automation of exit code value')
|
||||||
print(' -j - Output in JSON format')
|
print(' -j - Output in JSON format')
|
||||||
|
print(' -i - Consider the installation outcome regardless of whether the system appears healthy')
|
||||||
print('')
|
print('')
|
||||||
print(' Exit codes:')
|
print(' Exit codes:')
|
||||||
print(' 0 - Success, system appears to be running correctly')
|
print(' 0 - Success, system appears to be running correctly')
|
||||||
print(' 1 - Error, one or more subsystems are not running')
|
print(' 1 - Error, one or more subsystems are not running')
|
||||||
print(' 2 - System is starting')
|
print(' 2 - System is starting')
|
||||||
print(' 99 - Installation in progress')
|
print(' 99 - Installation in progress')
|
||||||
|
print(' 100 - System installation encountered errors')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@@ -38,6 +40,16 @@ def fail(msg):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def check_installation_status(options, console):
|
||||||
|
if "-i" in options:
|
||||||
|
if os.path.isfile('/root/failure'):
|
||||||
|
return 100
|
||||||
|
if os.path.isfile('/root/success'):
|
||||||
|
return 0
|
||||||
|
return 99
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def check_system_status(options, console):
|
def check_system_status(options, console):
|
||||||
code = 0
|
code = 0
|
||||||
highstate_end_time = 0
|
highstate_end_time = 0
|
||||||
@@ -66,6 +78,8 @@ def output(options, console, code, data):
|
|||||||
console.print(" [bold yellow]:hourglass: [bold white]System appears to be starting. No highstate has completed since the system was restarted.")
|
console.print(" [bold yellow]:hourglass: [bold white]System appears to be starting. No highstate has completed since the system was restarted.")
|
||||||
elif code == 99:
|
elif code == 99:
|
||||||
console.print(" [bold red]:exclamation: [bold white]Installation does not appear to be complete. A highstate has not fully completed.")
|
console.print(" [bold red]:exclamation: [bold white]Installation does not appear to be complete. A highstate has not fully completed.")
|
||||||
|
elif code == 100:
|
||||||
|
console.print(" [bold red]:exclamation: [bold white]Installation encountered errors.")
|
||||||
else:
|
else:
|
||||||
table = Table(title = "Security Onion Status", show_edge = False, safe_box = True, box = box.MINIMAL)
|
table = Table(title = "Security Onion Status", show_edge = False, safe_box = True, box = box.MINIMAL)
|
||||||
table.add_column("Container", justify="right", style="white", no_wrap=True)
|
table.add_column("Container", justify="right", style="white", no_wrap=True)
|
||||||
@@ -137,7 +151,9 @@ def check_container_status(options, console):
|
|||||||
|
|
||||||
def check_status(options, console):
|
def check_status(options, console):
|
||||||
container_list = []
|
container_list = []
|
||||||
code = check_system_status(options, console)
|
code = check_installation_status(options, console)
|
||||||
|
if code == 0:
|
||||||
|
code = check_system_status(options, console)
|
||||||
if code == 0:
|
if code == 0:
|
||||||
code, container_list = check_container_status(options, console)
|
code, container_list = check_container_status(options, console)
|
||||||
output(options, console, code, container_list)
|
output(options, console, code, container_list)
|
||||||
@@ -150,9 +166,8 @@ def main():
|
|||||||
for option in args:
|
for option in args:
|
||||||
if option.startswith("-"):
|
if option.startswith("-"):
|
||||||
options.append(option)
|
options.append(option)
|
||||||
args.remove(option)
|
|
||||||
|
|
||||||
if len(args) != 0 or "-h" in options:
|
if "-h" in options or "--help" in options or "-?" in options:
|
||||||
showUsage(options, None)
|
showUsage(options, None)
|
||||||
|
|
||||||
if os.environ["USER"] != "root":
|
if os.environ["USER"] != "root":
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Usage: so-stop filebeat | kibana | playbook | thehive
|
# Usage: so-stop kibana | playbook | thehive
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
@@ -20,6 +20,6 @@ if [ $# -ge 1 ]; then
|
|||||||
*) docker stop so-$1 ; docker rm so-$1 ;;
|
*) docker stop so-$1 ; docker rm so-$1 ;;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
echo -e "\nPlease provide an argument by running like so-stop $component, or by using the component-specific script.\nEx. so-stop filebeat, or so-filebeat-stop\n"
|
echo -e "\nPlease provide an argument by running like so-stop $component, or by using the component-specific script.\nEx. so-stop logstash, or so-logstash-stop\n"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -13,74 +13,74 @@ DEFAULT_ROLE=analyst
|
|||||||
|
|
||||||
function usage() {
|
function usage() {
|
||||||
cat <<USAGE_EOF
|
cat <<USAGE_EOF
|
||||||
Usage: $0 <operation> [supporting parameters]"
|
Usage: $0 <operation> [supporting parameters]
|
||||||
|
|
||||||
where <operation> is one of the following:"
|
where <operation> is one of the following:
|
||||||
|
|
||||||
list: Lists all user email addresses currently defined in the identity system"
|
list: Lists all user email addresses currently defined in the identity system
|
||||||
|
|
||||||
add: Adds a new user to the identity system"
|
add: Adds a new user to the identity system
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
Optional parameters: "
|
Optional parameters:
|
||||||
--role <role> (defaults to $DEFAULT_ROLE)"
|
--role <role> (defaults to $DEFAULT_ROLE)
|
||||||
--firstName <firstName> (defaults to blank)"
|
--firstName <firstName> (defaults to blank)
|
||||||
--lastName <lastName> (defaults to blank)"
|
--lastName <lastName> (defaults to blank)
|
||||||
--note <note> (defaults to blank)"
|
--note <note> (defaults to blank)
|
||||||
--skip-sync (defers the Elastic sync until the next scheduled time)
|
--skip-sync (defers the Elastic sync until the next scheduled time)
|
||||||
|
|
||||||
addrole: Grants a role to an existing user"
|
addrole: Grants a role to an existing user
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
--role <role>"
|
--role <role>
|
||||||
Optional parameters: "
|
Optional parameters:
|
||||||
--skip-sync (defers the Elastic sync until the next scheduled time)
|
--skip-sync (defers the Elastic sync until the next scheduled time)
|
||||||
|
|
||||||
delrole: Removes a role from an existing user"
|
delrole: Removes a role from an existing user
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
--role <role>"
|
--role <role>
|
||||||
Optional parameters: "
|
Optional parameters:
|
||||||
--skip-sync (defers the Elastic sync until the next scheduled time)
|
--skip-sync (defers the Elastic sync until the next scheduled time)
|
||||||
|
|
||||||
password: Updates a user's password and disables MFA"
|
password: Updates a user's password and disables MFA
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
Optional parameters: "
|
Optional parameters:
|
||||||
--skip-sync (defers the Elastic sync until the next scheduled time)
|
--skip-sync (defers the Elastic sync until the next scheduled time)
|
||||||
|
|
||||||
profile: Updates a user's profile information"
|
profile: Updates a user's profile information
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
Optional parameters: "
|
Optional parameters:
|
||||||
--role <role> (defaults to $DEFAULT_ROLE)"
|
--role <role> (defaults to $DEFAULT_ROLE)
|
||||||
--firstName <firstName> (defaults to blank)"
|
--firstName <firstName> (defaults to blank)
|
||||||
--lastName <lastName> (defaults to blank)"
|
--lastName <lastName> (defaults to blank)
|
||||||
--note <note> (defaults to blank)"
|
--note <note> (defaults to blank)
|
||||||
|
|
||||||
enable: Enables a user"
|
enable: Enables a user
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
Optional parameters: "
|
Optional parameters:
|
||||||
--skip-sync (defers the Elastic sync until the next scheduled time)
|
--skip-sync (defers the Elastic sync until the next scheduled time)
|
||||||
|
|
||||||
disable: Disables a user"
|
disable: Disables a user
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
Optional parameters: "
|
Optional parameters:
|
||||||
--skip-sync (defers the Elastic sync until the next scheduled time)
|
--skip-sync (defers the Elastic sync until the next scheduled time)
|
||||||
|
|
||||||
validate: Validates that the given email address and password are acceptable"
|
validate: Validates that the given email address and password are acceptable
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
|
|
||||||
valemail: Validates that the given email address is acceptable; requires 'email' parameter"
|
valemail: Validates that the given email address is acceptable; requires 'email' parameter
|
||||||
Required parameters: "
|
Required parameters:
|
||||||
--email <email>"
|
--email <email>
|
||||||
|
|
||||||
valpass: Validates that a password is acceptable"
|
valpass: Validates that a password is acceptable
|
||||||
|
|
||||||
Note that the password can be piped into STDIN to avoid prompting for it"
|
Note that the password can be piped into STDIN to avoid prompting for it
|
||||||
USAGE_EOF
|
USAGE_EOF
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
@@ -496,19 +496,6 @@ EOF
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function migrateLockedUsers() {
|
|
||||||
# This is a migration function to convert locked users from prior to 2.3.90
|
|
||||||
# to inactive users using the newer Kratos functionality. This should only
|
|
||||||
# find locked users once.
|
|
||||||
lockedEmails=$(curl -s ${kratosUrl}/identities | jq -r '.[] | select(.traits.status == "locked") | .traits.email')
|
|
||||||
if [[ -n "$lockedEmails" ]]; then
|
|
||||||
echo "Disabling locked users..."
|
|
||||||
for email in $lockedEmails; do
|
|
||||||
updateStatus "$email" locked
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function updateStatus() {
|
function updateStatus() {
|
||||||
email=$1
|
email=$1
|
||||||
status=$2
|
status=$2
|
||||||
@@ -587,7 +574,10 @@ case "${operation}" in
|
|||||||
createUser "$email" "${role:-$DEFAULT_ROLE}" "${firstName}" "${lastName}" "${note}"
|
createUser "$email" "${role:-$DEFAULT_ROLE}" "${firstName}" "${lastName}" "${note}"
|
||||||
syncAll
|
syncAll
|
||||||
echo "Successfully added new user to SOC"
|
echo "Successfully added new user to SOC"
|
||||||
check_container fleet && echo "$password" | so-fleet-user-add "$email"
|
echo "$password" | so-influxdb-manage useradd "$email"
|
||||||
|
if [[ "$role" == "superuser" ]]; then
|
||||||
|
echo "$password" | so-influxdb-manage userpromote "$email"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
"list")
|
"list")
|
||||||
@@ -605,6 +595,9 @@ case "${operation}" in
|
|||||||
if addUserRole "$email" "$role"; then
|
if addUserRole "$email" "$role"; then
|
||||||
syncElastic
|
syncElastic
|
||||||
echo "Successfully added role to user"
|
echo "Successfully added role to user"
|
||||||
|
if [[ "$role" == "superuser" ]]; then
|
||||||
|
echo "$password" | so-influxdb-manage userpromote "$email"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
@@ -618,6 +611,9 @@ case "${operation}" in
|
|||||||
deleteUserRole "$email" "$role"
|
deleteUserRole "$email" "$role"
|
||||||
syncElastic
|
syncElastic
|
||||||
echo "Successfully removed role from user"
|
echo "Successfully removed role from user"
|
||||||
|
if [[ "$role" == "superuser" ]]; then
|
||||||
|
echo "$password" | so-influxdb-manage userdemote "$email"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
"password")
|
"password")
|
||||||
@@ -628,6 +624,7 @@ case "${operation}" in
|
|||||||
updateUserPassword "$email"
|
updateUserPassword "$email"
|
||||||
syncAll
|
syncAll
|
||||||
echo "Successfully updated user password"
|
echo "Successfully updated user password"
|
||||||
|
echo "$password" | so-influxdb-manage userpass "$email"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
"profile")
|
"profile")
|
||||||
@@ -647,7 +644,7 @@ case "${operation}" in
|
|||||||
updateStatus "$email" 'active'
|
updateStatus "$email" 'active'
|
||||||
syncAll
|
syncAll
|
||||||
echo "Successfully enabled user"
|
echo "Successfully enabled user"
|
||||||
echo "Fleet user will need to be recreated manually with so-fleet-user-add"
|
so-influxdb-manage userenable "$email"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
"disable")
|
"disable")
|
||||||
@@ -658,7 +655,7 @@ case "${operation}" in
|
|||||||
updateStatus "$email" 'locked'
|
updateStatus "$email" 'locked'
|
||||||
syncAll
|
syncAll
|
||||||
echo "Successfully disabled user"
|
echo "Successfully disabled user"
|
||||||
check_container fleet && so-fleet-user-delete "$email"
|
so-influxdb-manage userdisable "$email"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
"delete")
|
"delete")
|
||||||
@@ -669,7 +666,7 @@ case "${operation}" in
|
|||||||
deleteUser "$email"
|
deleteUser "$email"
|
||||||
syncAll
|
syncAll
|
||||||
echo "Successfully deleted user"
|
echo "Successfully deleted user"
|
||||||
check_container fleet && so-fleet-user-delete "$email"
|
so-influxdb-manage userdel "$email"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
"sync")
|
"sync")
|
||||||
@@ -693,11 +690,6 @@ case "${operation}" in
|
|||||||
echo "Password is acceptable"
|
echo "Password is acceptable"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
"migrate")
|
|
||||||
migrateLockedUsers
|
|
||||||
echo "User migration complete"
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
*)
|
||||||
fail "Unsupported operation: $operation"
|
fail "Unsupported operation: $operation"
|
||||||
usage
|
usage
|
||||||
|
|||||||
@@ -340,7 +340,7 @@ check_os_updates() {
|
|||||||
clean_dockers() {
|
clean_dockers() {
|
||||||
# Place Holder for cleaning up old docker images
|
# Place Holder for cleaning up old docker images
|
||||||
echo "Trying to clean up old dockers."
|
echo "Trying to clean up old dockers."
|
||||||
docker system prune -a -f
|
docker system prune -a -f --volumes
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1026,8 +1026,6 @@ upgrade_salt() {
|
|||||||
else
|
else
|
||||||
echo "Salt upgrade success."
|
echo "Salt upgrade success."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Removing /opt/so/state files for patched Salt InfluxDB module and state. This is due to Salt being upgraded and needing to patch the files again."
|
|
||||||
rm -vrf /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdb_retention_policy.py.patched /opt/so/state/influxdbmod.py.patched
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -1138,9 +1136,7 @@ fix_wazuh() {
|
|||||||
# Update the repo files so it can actually upgrade
|
# Update the repo files so it can actually upgrade
|
||||||
upgrade_salt
|
upgrade_salt
|
||||||
fi
|
fi
|
||||||
rm -f /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdbmod.py.patched /opt/so/state/influxdb_retention_policy.py.patched
|
|
||||||
systemctl_func "start" "salt-master"
|
systemctl_func "start" "salt-master"
|
||||||
salt-call state.apply salt.python3-influxdb -l info
|
|
||||||
systemctl_func "start" "salt-minion"
|
systemctl_func "start" "salt-minion"
|
||||||
systemctl_func "start" "$cron_service_name"
|
systemctl_func "start" "$cron_service_name"
|
||||||
|
|
||||||
@@ -1309,11 +1305,6 @@ main() {
|
|||||||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Ensuring python modules for Salt are installed and patched."
|
|
||||||
salt-call state.apply salt.python3-influxdb -l info queue=True
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# update the salt-minion configs here and start the minion
|
# update the salt-minion configs here and start the minion
|
||||||
# since highstate are disabled above, minion start should not trigger a highstate
|
# since highstate are disabled above, minion start should not trigger a highstate
|
||||||
echo ""
|
echo ""
|
||||||
|
|||||||
@@ -1,182 +1,98 @@
|
|||||||
elasticsearch:
|
elasticsearch:
|
||||||
index_settings:
|
index_settings:
|
||||||
so-aws:
|
logs-import-so:
|
||||||
warm: 7
|
close: 73000
|
||||||
|
delete: 73001
|
||||||
|
logs-strelka-so:
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-azure:
|
logs-suricata-so:
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-barracuda:
|
logs-syslog-so:
|
||||||
warm: 7
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-zeek-so:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-elastic_agent-metricbeat-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-elastic_agent-osquerybeat-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-elastic_agent-fleet_server-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-elastic_agent-filebeat-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-elastic_agent-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-system-auth-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-system-application-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-system-security-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-system-system-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-system-syslog-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-windows-powershell-default:
|
||||||
|
close: 30
|
||||||
|
delete: 365
|
||||||
|
logs-windows-sysmon_operational-default:
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-beats:
|
so-beats:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-bluecoat:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-cef:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-checkpoint:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-cisco:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-cyberark:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-cylance:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-elasticsearch:
|
so-elasticsearch:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-endgame:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-f5:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-firewall:
|
so-firewall:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-fortinet:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-gcp:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-google_workspace:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-ids:
|
so-ids:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-imperva:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-import:
|
so-import:
|
||||||
warm: 7
|
|
||||||
close: 73000
|
close: 73000
|
||||||
delete: 73001
|
delete: 73001
|
||||||
so-infoblox:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-juniper:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-kratos:
|
so-kratos:
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-kibana:
|
so-kibana:
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-logstash:
|
so-logstash:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-microsoft:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-misp:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-netflow:
|
so-netflow:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-netscout:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-o365:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-okta:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-osquery:
|
so-osquery:
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-ossec:
|
so-ossec:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-proofpoint:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-radware:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-redis:
|
so-redis:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-snort:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-snyk:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-sonicwall:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-sophos:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-strelka:
|
so-strelka:
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-syslog:
|
so-syslog:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-tomcat:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
so-zeek:
|
so-zeek:
|
||||||
warm: 7
|
|
||||||
close: 30
|
|
||||||
delete: 365
|
|
||||||
so-zscaler:
|
|
||||||
warm: 7
|
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
|
|||||||
@@ -3,6 +3,11 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %}
|
||||||
|
{% set ELASTICMERGED = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) %}
|
||||||
|
|
||||||
|
{{ ELASTICMERGED.retention_pct }}
|
||||||
|
|
||||||
{%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit') %}
|
{%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit') %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
@@ -15,7 +20,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-.*|so-.*)$'
|
value: '^(logstash-.*|so-.*|.ds-logs-.*-so.*)$'
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(so-case.*)$'
|
value: '^(so-case.*)$'
|
||||||
|
|||||||
@@ -3,12 +3,12 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set cur_close_days = CURATORMERGED['so-barracuda'].close %}
|
{%- set cur_close_days = CURATORMERGED['logs-elastic_agent-default'].close %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: close
|
action: close
|
||||||
description: >-
|
description: >-
|
||||||
Close barracuda indices older than {{cur_close_days}} days.
|
Close Elastic Agent default indices older than {{cur_close_days}} days.
|
||||||
options:
|
options:
|
||||||
delete_aliases: False
|
delete_aliases: False
|
||||||
timeout_override:
|
timeout_override:
|
||||||
@@ -17,7 +17,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
value: '^(.ds-logs-elastic_agent-default.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-azure'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-elastic_agent-default'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete azure indices when older than {{ DELETE_DAYS }} days.
|
Delete Elastic Agent default indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-azure.*|so-azure.*)$'
|
value: '^(.ds-logs-elastic_agent-default.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set cur_close_days = CURATORMERGED['logs-elastic_agent-filebeat-default'].close %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close Elastic Agent Filebeat indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-elastic_agent.filebeat-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set DELETE_DAYS = CURATORMERGED['logs-elastic_agent-filebeat-default'].delete %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete Elastic Agent Filebeat indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-elastic_agent.filebeat-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
@@ -3,12 +3,12 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set cur_close_days = CURATORMERGED['so-checkpoint'].close %}
|
{%- set cur_close_days = CURATORMERGED['logs-elastic_agent-fleet_server-default'].close %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: close
|
action: close
|
||||||
description: >-
|
description: >-
|
||||||
Close checkpoint indices older than {{cur_close_days}} days.
|
Close Elastic Agent Fleet Server indices older than {{cur_close_days}} days.
|
||||||
options:
|
options:
|
||||||
delete_aliases: False
|
delete_aliases: False
|
||||||
timeout_override:
|
timeout_override:
|
||||||
@@ -17,7 +17,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
|
value: '^(.ds-logs-elastic_agent.fleet_server-default.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,20 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set DELETE_DAYS = CURATORMERGED['logs-elastic_agent-fleet_server-default'].delete %}
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-fortinet'].delete %}
|
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete fortinet indices when older than {{ DELETE_DAYS }} days.
|
Delete import indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-fortinet.*|so-fortinet.*)$'
|
value: '^(.ds-logs-elastic_agent.fleet_server-default.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set cur_close_days = CURATORMERGED['logs-elastic_agent-metricbeat-default'].close %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close Elastic Agent Metricbeat indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-elastic_agent.metricbeat-default-.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set DELETE_DAYS = CURATORMERGED['logs-elastic_agent-metricbeat-default'].delete %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete Elastic Agent Metricbeat indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-elastic_agent.metricbeat-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set cur_close_days = CURATORMERGED['logs-elastic_agent-osquerybeat-default'].close %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close Elastic Agent Osquerybeat indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-elastic_agent.osquerybeat-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set DELETE_DAYS = CURATORMERGED['logs-elastic_agent-osquerybeat-default'].delete %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete Elastic Agent Osquerybeat indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-elastic_agent.osquerybeat-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-sophos'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-import-so'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete sophos indices when older than {{ DELETE_DAYS }} days.
|
Delete import indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-sophos.*|so-sophos.*)$'
|
value: '^(.ds-logs-import-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,12 +3,12 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set cur_close_days = CURATORMERGED['so-azure'].close %}
|
{%- set cur_close_days = CURATORMERGED['logs-import-so'].close %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: close
|
action: close
|
||||||
description: >-
|
description: >-
|
||||||
Close azure indices older than {{cur_close_days}} days.
|
Close import indices older than {{cur_close_days}} days.
|
||||||
options:
|
options:
|
||||||
delete_aliases: False
|
delete_aliases: False
|
||||||
timeout_override:
|
timeout_override:
|
||||||
@@ -17,7 +17,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-azure.*|so-azure.*)$'
|
value: '^(.ds-logs-import-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-tomcat'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-import-so'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete tomcat indices when older than {{ DELETE_DAYS }} days.
|
Delete import indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-tomcat.*|so-tomcat.*)$'
|
value: '^(.ds-logs-import-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,12 +3,12 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set cur_close_days = CURATORMERGED['so-cisco'].close %}
|
{%- set cur_close_days = CURATORMERGED['logs-strelka-so'].close %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: close
|
action: close
|
||||||
description: >-
|
description: >-
|
||||||
Close cisco indices older than {{cur_close_days}} days.
|
Close Strelka indices older than {{cur_close_days}} days.
|
||||||
options:
|
options:
|
||||||
delete_aliases: False
|
delete_aliases: False
|
||||||
timeout_override:
|
timeout_override:
|
||||||
@@ -17,7 +17,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-cisco.*|so-cisco.*)$'
|
value: '^(.ds-logs-strelka-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-cylance'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-strelka-so'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete cylance indices when older than {{ DELETE_DAYS }} days.
|
Delete Strelka indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-cylance.*|so-cylance.*)$'
|
value: '^(.ds-logs-strelka-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,13 +3,12 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set cur_close_days = CURATORMERGED['logs-suricata-so'].close %}
|
||||||
{%- set cur_close_days = CURATORMERGED['so-f5'].close %}
|
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: close
|
action: close
|
||||||
description: >-
|
description: >-
|
||||||
Close f5 indices older than {{cur_close_days}} days.
|
Close Suricata indices older than {{cur_close_days}} days.
|
||||||
options:
|
options:
|
||||||
delete_aliases: False
|
delete_aliases: False
|
||||||
timeout_override:
|
timeout_override:
|
||||||
@@ -18,7 +17,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-f5.*|so-f5.*)$'
|
value: '^(.ds-logs-suricata-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-cyberark'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-suricata-so'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete cyberark indices when older than {{ DELETE_DAYS }} days.
|
Delete Suricata indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-cyberark.*|so-cyberark.*)$'
|
value: '^(.ds-logs-suricata-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,12 +3,12 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set cur_close_days = CURATORMERGED['so-aws'].close %}
|
{%- set cur_close_days = CURATORMERGED['logs-syslog-so'].close %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: close
|
action: close
|
||||||
description: >-
|
description: >-
|
||||||
Close aws indices older than {{cur_close_days}} days.
|
Close syslog indices older than {{cur_close_days}} days.
|
||||||
options:
|
options:
|
||||||
delete_aliases: False
|
delete_aliases: False
|
||||||
timeout_override:
|
timeout_override:
|
||||||
@@ -17,7 +17,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-aws.*|so-aws.*)$'
|
value: '^(.ds-logs-syslog-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-cisco'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-syslog-so'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete cisco indices when older than {{ DELETE_DAYS }} days.
|
Delete syslog indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-cisco.*|so-cisco.*)$'
|
value: '^(.ds-logs-syslog-so.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set cur_close_days = CURATORMERGED['logs-system-application-default'].close %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close Elastic Agent system application indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-system.application-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set DELETE_DAYS = CURATORMERGED['logs-system-application-default'].delete %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete Elastic Agent system application indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-system.application-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
@@ -3,12 +3,12 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set cur_close_days = CURATORMERGED['so-bluecoat'].close %}
|
{%- set cur_close_days = CURATORMERGED['logs-system-auth-default'].close %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: close
|
action: close
|
||||||
description: >-
|
description: >-
|
||||||
Close bluecoat indices older than {{cur_close_days}} days.
|
Close Elastic Agent system auth indices older than {{cur_close_days}} days.
|
||||||
options:
|
options:
|
||||||
delete_aliases: False
|
delete_aliases: False
|
||||||
timeout_override:
|
timeout_override:
|
||||||
@@ -17,7 +17,7 @@ actions:
|
|||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
value: '^(.ds-logs-system.auth-default.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-barracuda'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-system-auth-default'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete barracuda indices when older than {{ DELETE_DAYS }} days.
|
Delete Elastic Agent system auth indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
value: '^(.ds-logs-system.auth-default.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set cur_close_days = CURATORMERGED['logs-system-security-default'].close %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close Elastic Agent system security indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-system.security-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set DELETE_DAYS = CURATORMERGED['logs-system-security-default'].delete %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete Elastic Agent system security indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-system.security-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{%- set cur_close_days = CURATORMERGED['logs-system-syslog-default'].close %}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close Elastic Agent system syslog indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(.ds-logs-system.syslog-default.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
@@ -3,19 +3,19 @@
|
|||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{%- set DELETE_DAYS = CURATORMERGED['so-bluecoat'].delete %}
|
{%- set DELETE_DAYS = CURATORMERGED['logs-system-syslog-default'].delete %}
|
||||||
actions:
|
actions:
|
||||||
1:
|
1:
|
||||||
action: delete_indices
|
action: delete_indices
|
||||||
description: >-
|
description: >-
|
||||||
Delete bluecoat indices when older than {{ DELETE_DAYS }} days.
|
Delete Elastic Agent system syslog indices when older than {{ DELETE_DAYS }} days.
|
||||||
options:
|
options:
|
||||||
ignore_empty_list: True
|
ignore_empty_list: True
|
||||||
disable_action: False
|
disable_action: False
|
||||||
filters:
|
filters:
|
||||||
- filtertype: pattern
|
- filtertype: pattern
|
||||||
kind: regex
|
kind: regex
|
||||||
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
value: '^(.ds-logs-system.syslog-default.*)$'
|
||||||
- filtertype: age
|
- filtertype: age
|
||||||
source: name
|
source: name
|
||||||
direction: older
|
direction: older
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user