Merge pull request #10193 from Security-Onion-Solutions/2.4/dev

2.4.1
This commit is contained in:
Mike Reeves
2023-04-24 13:29:45 -04:00
committed by GitHub
113 changed files with 1785 additions and 855 deletions

View File

@@ -1,6 +1,6 @@
## Security Onion 2.4
## Security Onion 2.4 Beta 2
Security Onion 2.4 is here!
Security Onion 2.4 Beta 2 is here!
## Screenshots

View File

@@ -1,52 +1 @@
### 2.3.120-20220425 ISO image built on 2022/04/25
### Download and Verify
2.3.120-20220425 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso
MD5: C99729E452B064C471BEF04532F28556
SHA1: 60BF07D5347C24568C7B793BFA9792E98479CFBF
SHA256: CD17D0D7CABE21D45FA45E1CF91C5F24EB9608C79FF88480134E5592AFDD696E
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
For example, here are the steps you can use on most Linux distributions to download and verify our Security Onion ISO image.
Download and import the signing key:
```
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -O - | gpg --import -
```
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.3.120-20220425.iso.sig securityonion-2.3.120-20220425.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Mon 25 Apr 2022 08:20:40 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
```
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
https://docs.securityonion.net/en/2.3/installation.html
### An ISO will be available starting in RC1.

View File

@@ -1 +1 @@
2.4.0
2.4.1

View File

@@ -0,0 +1,6 @@
logstash:
pipelines:
fleet:
config:
- so/0012_input_elastic_agent.conf
- so/9806_output_lumberjack_fleet.conf.jinja

View File

@@ -4,6 +4,7 @@ logstash:
- 0.0.0.0:3765:3765
- 0.0.0.0:5044:5044
- 0.0.0.0:5055:5055
- 0.0.0.0:5056:5056
- 0.0.0.0:5644:5644
- 0.0.0.0:6050:6050
- 0.0.0.0:6051:6051

View File

@@ -4,5 +4,5 @@ logstash:
config:
- so/0011_input_endgame.conf
- so/0012_input_elastic_agent.conf
- so/0013_input_lumberjack_fleet.conf
- so/9999_output_redis.conf.jinja

View File

@@ -49,8 +49,8 @@ base:
- kibana.secrets
{% endif %}
- secrets
- soc_global
- adv_global
- global.soc_global
- global.adv_global
- manager.soc_manager
- manager.adv_manager
- idstools.soc_idstools
@@ -74,8 +74,8 @@ base:
'*_sensor':
- healthcheck.sensor
- soc_global
- adv_global
- global.soc_global
- global.adv_global
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
@@ -89,7 +89,8 @@ base:
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
- kibana.secrets
{% endif %}
- soc_global
- global.soc_global
- global.adv_global
- kratos.soc_kratos
- elasticsearch.soc_elasticsearch
- elasticsearch.adv_elasticsearch
@@ -126,7 +127,8 @@ base:
{% endif %}
- secrets
- healthcheck.standalone
- soc_global
- global.soc_global
- global.adv_global
- idstools.soc_idstools
- idstools.adv_idstools
- kratos.soc_kratos
@@ -149,14 +151,15 @@ base:
'*_heavynode':
- elasticsearch.auth
- soc_global
- global.soc_global
- global.adv_global
- redis.soc_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
'*_idh':
- soc_global
- adv_global
- global.soc_global
- global.adv_global
- idh.soc_idh
- idh.adv_idh
- minions.{{ grains.id }}
@@ -174,8 +177,8 @@ base:
- elasticsearch.auth
{% endif %}
- redis.soc_redis
- soc_global
- adv_global
- global.soc_global
- global.adv_global
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
@@ -189,8 +192,8 @@ base:
{% endif %}
- redis.soc_redis
- redis.adv_redis
- soc_global
- adv_global
- global.soc_global
- global.adv_global
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
@@ -209,8 +212,8 @@ base:
- manager.soc_manager
- manager.adv_manager
- soc.soc_soc
- soc_global
- adv_global
- global.soc_global
- global.adv_global
- backup.soc_backup
- backup.adv_backup
- kratos.soc_kratos
@@ -224,6 +227,18 @@ base:
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
'*_fleet':
- global.soc_global
- global.adv_global
- backup.soc_backup
- backup.adv_backup
- logstash
- logstash.fleet
- logstash.soc_logstash
- logstash.adv_logstash
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
'*_workstation':
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}

View File

@@ -191,6 +191,16 @@
'tcpreplay',
'docker_clean'
],
'so-fleet': [
'ssl',
'telegraf',
'firewall',
'logstash',
'healthcheck',
'schedule',
'elasticfleet',
'docker_clean'
],
'so-receiver': [
'ssl',
'telegraf',

View File

@@ -25,6 +25,7 @@ config_backup_script:
so_config_backup:
cron.present:
- name: /usr/sbin/so-config-backup > /dev/null 2>&1
- identifier: so_config_backup
- user: root
- minute: '1'
- hour: '0'

View File

@@ -18,7 +18,7 @@ include:
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- bits: 4096
- keysize: 4096
- passphrase:
- cipher: aes_256_cbc
- backup: True
@@ -39,7 +39,7 @@ pki_public_ca_crt:
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- authorityKeyIdentifier: keyid:always, issuer
- days_valid: 3650
- days_remaining: 0
- backup: True

View File

@@ -97,6 +97,8 @@ alwaysupdated:
Etc/UTC:
timezone.system
# Sync curl configuration for Elasticsearch authentication
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
elastic_curl_config:
file.managed:
- name: /opt/so/conf/elasticsearch/curl.config
@@ -108,6 +110,7 @@ elastic_curl_config:
- require:
- file: elastic_curl_config_distributed
{% endif %}
{% endif %}
# Sync some Utilities
utilsyncscripts:
@@ -133,8 +136,10 @@ so-status_script:
{% if GLOBALS.role in GLOBALS.sensor_roles %}
# Add sensor cleanup
/usr/sbin/so-sensor-clean:
so-sensor-clean:
cron.present:
- name: /usr/sbin/so-sensor-clean
- identifier: so-sensor-clean
- user: root
- minute: '*'
- hour: '*'
@@ -154,8 +159,10 @@ sensorrotateconf:
- source: salt://common/files/sensor-rotate.conf
- mode: 644
/usr/local/bin/sensor-rotate:
sensor-rotate:
cron.present:
- name: /usr/local/bin/sensor-rotate
- identifier: sensor-rotate
- user: root
- minute: '1'
- hour: '0'
@@ -178,8 +185,10 @@ commonlogrotateconf:
- template: jinja
- mode: 644
/usr/local/bin/common-rotate:
common-rotate:
cron.present:
- name: /usr/local/bin/common-rotate
- identifier: common-rotate
- user: root
- minute: '1'
- hour: '0'
@@ -200,17 +209,11 @@ sostatus_log:
- name: /opt/so/log/sostatus/status.log
- mode: 644
common_pip_dependencies:
pip.installed:
- user: root
- pkgs:
- rich
- target: /usr/lib64/python3.6/site-packages
# Install sostatus check cron
sostatus_check_cron:
# Install sostatus check cron. This is used to populate Grid.
so-status_check_cron:
cron.present:
- name: '/usr/sbin/so-status -j > /opt/so/log/sostatus/status.log 2>&1'
- identifier: so-status_check_cron
- user: root
- minute: '*/1'
- hour: '*'
@@ -220,7 +223,7 @@ sostatus_check_cron:
remove_post_setup_cron:
cron.absent:
- name: 'salt-call state.highstate'
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
- identifier: post_setup_cron
{% if GLOBALS.role not in ['eval', 'manager', 'managersearch', 'standalone'] %}
@@ -234,7 +237,7 @@ soversionfile:
{% endif %}
{% if GLOBALS.so_model %}
{% if GLOBALS.so_model and GLOBALS.so_model not in ['SO2AMI01', 'SO2AZI01', 'SO2GCI01'] %}
{% if GLOBALS.os == 'Rocky' %}
# Install Raid tools
raidpkgs:
@@ -246,9 +249,10 @@ raidpkgs:
{% endif %}
# Install raid check cron
so_raid_status:
so-raid-status:
cron.present:
- name: '/usr/sbin/so-raid-status > /dev/null 2>&1'
- identifier: so-raid-status
- user: root
- minute: '*/15'
- hour: '*'

View File

@@ -5,28 +5,37 @@ commonpkgs:
pkg.installed:
- skip_suggestions: True
- pkgs:
- chrony
- apache2-utils
- wget
- ntpdate
- jq
- python3-docker
- curl
- ca-certificates
- software-properties-common
- apt-transport-https
- openssl
- netcat
- python3-mysqldb
- sqlite3
- libssl-dev
- python3-dateutil
- python3-m2crypto
- python3-mysqldb
- python3-packaging
- python3-watchdog
- python3-lxml
- git
- vim
# since Ubuntu requires and internet connection we can use pip to install modules
python3-pip:
pkg.installed
python-rich:
pip.installed:
- name: rich
- target: /usr/local/lib/python3.8/dist-packages/
- require:
- pkg: python3-pip
{% elif GLOBALS.os == 'Rocky' %}
commonpkgs:
pkg.installed:
@@ -51,6 +60,8 @@ commonpkgs:
- python3-m2crypto
- rsync
- python3-rich
- python3-pyyaml
- python3-watchdog
- python3-packaging
- unzip
{% endif %}

View File

@@ -61,7 +61,7 @@ if [ -f "$pillar_file" ]; then
reboot;
else
echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/logs/salt/minion."
echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/log/salt/minion."
fi
else # workstation is already added
echo "The workstation pillar already exists in $pillar_file."

View File

@@ -54,6 +54,8 @@ add_interface_bond0() {
ethtool -K "$BNIC" $i off &>/dev/null
fi
done
if ! [[ $is_cloud ]]; then
# Check if the bond slave connection has already been created
nmcli -f name,uuid -p con | grep -q "bond0-slave-$BNIC"
local found_int=$?
@@ -71,16 +73,18 @@ add_interface_bond0() {
ethernet.mtu "$MTU" \
connection.autoconnect "yes"
fi
fi
ip link set dev "$BNIC" arp off multicast off allmulticast off promisc on
if ! [[ $is_cloud ]]; then
# Bring the slave interface up
if [[ $verbose == true ]]; then
nmcli con up "bond0-slave-$BNIC"
else
nmcli con up "bond0-slave-$BNIC" &>/dev/null
fi
fi
if [ "$nic_error" != 0 ]; then
return "$nic_error"
fi
@@ -156,6 +160,39 @@ disable_fastestmirror() {
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
}
elastic_fleet_integration_create() {
JSON_STRING=$1
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_policy_create() {
NAME=$1
DESC=$2
FLEETSERVER=$3
JSON_STRING=$( jq -n \
--arg NAME "$NAME" \
--arg DESC "$DESC" \
--arg FLEETSERVER "$FLEETSERVER" \
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":1209600,"has_fleet_server":$FLEETSERVER}'
)
# Create Fleet Policy
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_policy_update() {
POLICYID=$1
JSON_STRING=$2
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_license() {
read -r -d '' message <<- EOM
@@ -201,7 +238,7 @@ gpg_rpm_import() {
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/rocky/keys"
fi
RPMKEYS=('RPM-GPG-KEY-EPEL-9' 'SALTSTACK-GPG-KEY2.pub' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
RPMKEYS=('RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
for RPMKEY in "${RPMKEYS[@]}"; do
rpm --import $RPMKEYSLOC/$RPMKEY

View File

@@ -12,22 +12,22 @@
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints")) | .api_key')
FLEETHOST=$(lookup_pillar "server:url" "elasticfleet")
#FLEETHOST=$(lookup_pillar "server:url" "elasticfleet")
FLEETHOST="{{ GLOBALS.manager_ip }}"
#FLEETHOST=$1
#ENROLLMENTOKEN=$2
CONTAINERGOOS=( "linux" "darwin" "windows" )
rm -rf /tmp/elastic-agent-workspace
mkdir -p /tmp/elastic-agent-workspace
#rm -rf /tmp/elastic-agent-workspace
#mkdir -p /tmp/elastic-agent-workspace
for OS in "${CONTAINERGOOS[@]}"
do
printf "\n\nGenerating $OS Installer..."
cp /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/so-elastic-agent-*-$OS-x86_64.tar.gz /tmp/elastic-agent-workspace/$OS.tar.gz
#cp /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/so-elastic-agent-*-$OS-x86_64.tar.gz /tmp/elastic-agent-workspace/$OS.tar.gz
docker run -e CGO_ENABLED=0 -e GOOS=$OS \
--mount type=bind,source=/etc/ssl/certs/,target=/workspace/files/cert/ \
--mount type=bind,source=/tmp/elastic-agent-workspace/,target=/workspace/files/elastic-agent/ \
--mount type=bind,source=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/,target=/output/ \
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} go build -ldflags "-X main.fleetHost=$FLEETHOST -X main.enrollmentToken=$ENROLLMENTOKEN" -o /output/so-elastic-agent_$OS
printf "\n $OS Installer Generated..."

View File

@@ -21,15 +21,33 @@ Security Onion Elastic Clear
-y Skip interactive mode
EOF
}
while getopts "h:y" OPTION
while getopts "h:cdely" OPTION
do
case $OPTION in
h)
usage
exit 0
;;
c)
DELETE_CASES_DATA=1
SKIP=1
;;
d)
DONT_STOP_SERVICES=1
SKIP=1
;;
e)
DELETE_ELASTALERT_DATA=1
SKIP=1
;;
l)
DELETE_LOG_DATA=1
SKIP=1
;;
y)
DELETE_CASES_DATA=1
DELETE_ELASTALERT_DATA=1
DELETE_LOG_DATA=1
SKIP=1
;;
*)
@@ -54,41 +72,83 @@ if [ $SKIP -ne 1 ]; then
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
fi
# Check to see if Logstash are running
if [ -z "$DONT_STOP_SERVICES" ]; then
# Stop Elastic Agent
for i in $(pgrep elastic-agent | grep -v grep); do
kill -9 $i;
done
# Check to see if Elastic Fleet, Logstash, Elastalert are running
#EF_ENABLED=$(so-status | grep elastic-fleet)
LS_ENABLED=$(so-status | grep logstash)
EA_ENABLED=$(so-status | grep elastalert)
#if [ ! -z "$EF_ENABLED" ]; then
# /usr/sbin/so-elastic-fleet-stop
#fi
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-stop
fi
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-stop
fi
fi
# Delete data
echo "Deleting data..."
INDXS=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
if [ ! -z "$DELETE_CASES_DATA" ]; then
# Delete Cases data
echo "Deleting Cases data..."
INDXS=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index | grep "so-case")
for INDX in ${INDXS}
do
curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
echo "Deleting $INDX"
/usr/sbin/so-elasticsearch-query ${INDX} -XDELETE > /dev/null 2>&1
done
fi
# Delete Elastalert data
if [ ! -z "$DELETE_ELASTALERT_DATA" ]; then
# Delete Elastalert data
echo "Deleting Elastalert data..."
INDXS=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index | grep "elastalert")
for INDX in ${INDXS}
do
echo "Deleting $INDX"
/usr/sbin/so-elasticsearch-query ${INDX} -XDELETE > /dev/null 2>&1
done
fi
# Delete log data
if [ ! -z "$DELETE_LOG_DATA" ]; then
echo "Deleting log data ..."
DATASTREAMS=$(/usr/sbin/so-elasticsearch-query _data_stream | jq -r '.[] |.[].name')
for DATASTREAM in ${DATASTREAMS}
do
# Delete the data stream
echo "Deleting $DATASTREAM..."
/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} -XDELETE > /dev/null 2>&1
done
fi
if [ -z "$DONT_STOP_SERVICES" ]; then
#Start Logstash
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-start
fi
#Start Elastic Fleet
#if [ ! -z "$EF_ENABLED" ]; then
# /usr/sbin/so-elastic-fleet-start
#fi
#Start Elastalert
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-start
fi
# Start Elastic Agent
/usr/bin/elastic-agent restart
fi

View File

@@ -6,132 +6,16 @@
. /usr/sbin/so-common
{%- set ZEEKVER = salt['pillar.get']('global:mdengine', '') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
{%- set RITAENABLED = salt['pillar.get']('rita:enabled', False) %}
# Initial Endpoints
for INTEGRATION in /opt/so/saltstack/default/salt/elasticfleet/files/integrations/endpoints-initial/*.json
do
printf "\n\nInitial Endpoint Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_create "@$INTEGRATION"
done
wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
# Let's snag a cookie from Kibana
SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
# Disable certain Features from showing up in the Kibana UI
echo
echo "Disable certain Features from showing up in the Kibana UI"
so-kibana-space-defaults
echo
# Suricata logs
echo
echo "Setting up Suricata package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "suricata-logs", "name": "suricata-logs", "description": "Suricata integration", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/suricata/eve*.json" ], "data_stream.dataset": "suricata", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata", "custom": "pipeline: suricata.common" }}}}}}'
echo
# Zeek logs
echo
echo "Setting up Zeek package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "zeek-logs", "description": "Zeek logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/zeek/logs/current/*.log"], "data_stream.dataset": "zeek", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", "custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n" } } } } } }'
echo
# Import - EVTX
echo
echo "Setting up EVTX import package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "package": { "name": "log", "version": "1.1.0" }, "name": "import-evtx-logs", "namespace": "so", "description": "Import Windows EVTX logs", "policy_id": "so-grid-nodes", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/import/*/evtx/data.json" ], "data_stream.dataset": "import", "custom": "pipeline: import.wel", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: windows_eventlog\n imported: true", "tags": [] } } } } } }'
echo
# Import - Suricata logs
echo
echo "Setting up Suricata import package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "import-suricata-logs", "name": "import-suricata-logs", "description": "Import Suricata logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/suricata/eve*.json"], "data_stream.dataset": "import", "tags": [], "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"", "custom": "pipeline: suricata.common" } } } } } }'
echo
# Import - Zeek logs
echo
echo "Setting up Zeek import package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "import-zeek-logs", "description": "Zeek Import logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/zeek/logs/*.log"], "data_stream.dataset": "import", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", "custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n" } } } } } }'
echo
# Strelka logs
echo
echo "Setting up Strelka package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "strelka-logs", "name": "strelka-logs", "description": "Strelka logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/strelka/log/strelka.log" ], "data_stream.dataset": "strelka", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka", "custom": "pipeline: strelka.file" }}}}}}'
echo
# Syslog TCP Port 514
echo
echo "Setting up Syslog TCP package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "tcp", "version": "1.5.0" }, "id": "syslog-tcp-514", "name": "syslog-tcp-514", "description": "Syslog Over TCP Port 514", "namespace": "so", "inputs": { "tcp-tcp": { "enabled": true, "streams": { "tcp.generic": { "enabled": true, "vars": { "listen_address": "0.0.0.0", "listen_port": "514", "data_stream.dataset": "syslog", "pipeline": "syslog", "processors": "- add_fields:\n target: event\n fields:\n module: syslog", "tags": [ "syslog" ], "syslog_options": "field: message\n#format: auto\n#timezone: Local" } } } } } }'
echo
# Syslog UDP Port 514
echo
echo "Setting up Syslog UDP package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "udp", "version": "1.5.0" }, "id": "syslog-udp-514", "name": "syslog-udp-514", "description": "Syslog over UDP Port 514", "namespace": "so", "inputs": { "udp-udp": { "enabled": true, "streams": { "udp.generic": { "enabled": true, "vars": { "listen_address": "0.0.0.0", "listen_port": "514", "data_stream.dataset": "syslog", "pipeline": "syslog", "max_message_size": "10KiB", "keep_null": false, "processors": "- add_fields:\n target: event\n fields: \n module: syslog\n", "tags": [ "syslog" ], "syslog_options": "field: message\n#format: auto\n#timezone: Local" } } } } } }'
echo
# Kratos logs
echo
echo "Setting up Kratos package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "kratos-logs", "name": "kratos-logs", "description": "Kratos logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kratos/kratos.log" ], "data_stream.dataset": "kratos", "tags": [],"custom":"pipeline: kratos","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos" }}}}}}'
echo
# RITA Logs
#echo
#echo "Setting up RITA package policy..."
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "rita-logs", "name": "rita-logs", "description": "RITA Beacon logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/rita/beacons.csv", "/nsm/rita/long-connections.csv", "/nsm/rita/short-connections.csv", "/nsm/rita/exploded-dns.csv" ], "data_stream.dataset": "rita", "tags": [], "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: rita\n- if:\n log.file.path: beacons.csv\n then: \n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: rita.beacon\n- if:\n regexp:\n log.file.path: \"*connections.csv\"\n then: \n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: rita.connection\n- if:\n log.file.path: \"exploded-dns.csv\"\n then: \n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: rita.dns" }}}}}}'
#echo
# Elasticsearch logs
echo
echo "Setting up Elasticsearch package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "elasticsearch", "version": "1.0.0" }, "id": "elasticsearch-logs", "name": "elasticsearch-logs", "description": "Elasticsearch Logs", "namespace": "default", "inputs": { "elasticsearch-logfile": { "enabled": true, "streams": { "elasticsearch.audit": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_audit.json" ] } }, "elasticsearch.deprecation": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_deprecation.json" ] } }, "elasticsearch.gc": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/gc.log.[0-9]*", "/var/log/elasticsearch/gc.log" ] } }, "elasticsearch.server": { "enabled": true, "vars": { "paths": [ "/opt/so/log/elasticsearch/*.log" ] } }, "elasticsearch.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_index_search_slowlog.json", "/var/log/elasticsearch/*_index_indexing_slowlog.json" ] } } } }, "elasticsearch-elasticsearch/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9200" ], "scope": "node" }, "streams": { "elasticsearch.stack_monitoring.ccr": { "enabled": false }, "elasticsearch.stack_monitoring.cluster_stats": { "enabled": false }, "elasticsearch.stack_monitoring.enrich": { "enabled": false }, "elasticsearch.stack_monitoring.index": { "enabled": false }, "elasticsearch.stack_monitoring.index_recovery": { "enabled": false, "vars": { "active.only": true } }, "elasticsearch.stack_monitoring.index_summary": { "enabled": false }, "elasticsearch.stack_monitoring.ml_job": { "enabled": false }, "elasticsearch.stack_monitoring.node": { "enabled": false }, "elasticsearch.stack_monitoring.node_stats": { "enabled": false }, "elasticsearch.stack_monitoring.pending_tasks": { "enabled": false }, "elasticsearch.stack_monitoring.shard": { "enabled": false } } } } }'
echo
# Logstash logs
#echo
#echo "Setting up Logstash package policy..."
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "logstash", "version": "2.0.0" }, "id": "logstash-logs", "name": "logstash-logs", "description": "Logstash logs", "namespace": "default", "inputs": { "logstash-logfile": { "enabled": true, "streams": { "logstash.log": { "enabled": true, "vars": { "paths": [ "/opt/so/logs/logstash/logstash.log" ] } }, "logstash.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/logstash/logstash-slowlog-plain*.log", "/var/log/logstash/logstash-slowlog-json*.log" ] } } } }, "logstash-logstash/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9600" ], "period": "10s" }, "streams": { "logstash.stack_monitoring.node": { "enabled": false }, "logstash.stack_monitoring.node_stats": { "enabled": false } } } } }'
#echo
# Kibana logs
#echo
#echo "Setting up Kibana package policy..."
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "kibana", "version": "2.0.0" }, "id": "kibana-logs", "name": "kibana-logs", "description": "Kibana logs", "namespace": "default", "inputs": { "kibana-logfile": { "enabled": true, "streams": { "kibana.audit": { "enabled": false, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } }, "kibana.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } } } }, "kibana-kibana/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:5601" ] }, "streams": { "kibana.stack_monitoring.cluster_actions": { "enabled": false }, "kibana.stack_monitoring.cluster_rules": { "enabled": false }, "kibana.stack_monitoring.node_actions": { "enabled": false }, "kibana.stack_monitoring.node_rules": { "enabled": false }, "kibana.stack_monitoring.stats": { "enabled": false }, "kibana.stack_monitoring.status": { "enabled": false } } } } }'
#echo
# Redis logs
echo
echo "Setting up Redis package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "redis", "version": "1.4.0" }, "id": "redis-logs", "name": "redis-logs", "description": "Redis logs", "namespace": "default", "inputs": { "redis-logfile": { "enabled": true, "streams": { "redis.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/redis/redis.log" ], "tags": [ "redis-log" ], "preserve_original_event": false } } } }, "redis-redis": { "enabled": false, "streams": { "redis.slowlog": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "password": "" } } } }, "redis-redis/metrics": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "idle_timeout": "20s", "maxconn": 10, "network": "tcp", "password": "" }, "streams": { "redis.info": { "enabled": false, "vars": { "period": "10s" } }, "redis.key": { "enabled": false, "vars": { "key.patterns": "- limit: 20\n pattern: '*'\n", "period": "10s" } }, "redis.keyspace": { "enabled": false, "vars": { "period": "10s" } } } } } }'
echo
# IDH logs
echo
echo "Setting up IDH package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"policy_id":"so-grid-nodes","package":{"name":"log","version":"1.1.1"},"id":"idh-logs","name":"idh-logs","namespace":"so","description":"IDH integration","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/nsm/idh/opencanary.log"],"data_stream.dataset":"idh","custom":"pipeline: common","processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '\''[\"prospector\", \"input\", \"offset\", \"beat\"]'\''\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary","tags":[]}}}}}}'
echo
# SOC - Server logs
echo
echo "Setting up SOC - Server Logs package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-server-logs","namespace":"so","description":"Security Onion Console Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/sensoroni-server.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true","tags":[]}}}}}}'
echo
# SOC - Sensoroni logs
echo
echo "Setting up SOC - Sensoroni Logs package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-sensoroni-logs","namespace":"so","description":"Security Onion - Sensoroni - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/sensoroni/sensoroni.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true","tags":[]}}}}}}'
echo
# SOC - Elastic Auth Sync logs
echo
echo "Setting up SOC - Elastic Auth Sync Logs package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-auth-sync-logs","namespace":"so","description":"Security Onion - Elastic Auth Sync - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/sync.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync","tags":[]}}}}}}'
echo
# SOC - Salt Relay logs
echo
echo "Setting up SOC - Salt_Relay Logs package policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-salt-relay-logs","namespace":"so","description":"Security Onion - Salt Relay - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/salt-relay.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay","tags":[]}}}}}}'
echo
# Grid Nodes
for INTEGRATION in /opt/so/saltstack/default/salt/elasticfleet/files/integrations/grid-nodes/*.json
do
printf "\n\nGrid Nodes Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_create "@$INTEGRATION"
done

View File

@@ -9,4 +9,4 @@
. /usr/sbin/so-common
/usr/sbin/so-restart elastic-fleet $1
/usr/sbin/so-restart elasticfleet $1

View File

@@ -8,68 +8,63 @@
. /usr/sbin/so-common
# Create ES Token
printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
printf "ESTOKEN = $ESTOKEN \n"
# Add SO-Manager Fleet URL
## This array replaces whatever URLs are currently configured
printf "\n"
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/settings" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"fleet_server_hosts":["https://{{ GLOBALS.manager_ip }}:8220"]}'
printf "\n\n"
# Configure certificates
mkdir -p /opt/so/conf/elastic-fleet/certs
cp /etc/ssl/certs/intca.crt /opt/so/conf/elastic-fleet/certs
cp /etc/pki/elasticfleet* /opt/so/conf/elastic-fleet/certs
{% if grains.role in ['so-import', 'so-standalone', 'so-eval', 'so-manager', 'so-managersearch'] %}
# Add SO-Manager Elasticsearch Ouput
ESCACRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt)
### Create Outputs & Fleet URLs ###
printf "\nAdd Manager Elasticsearch Ouput...\n"
ESCACRT=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$( jq -n \
--arg ESCACRT "$ESCACRT" \
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
printf "\n\n"
{% else %}
# Create Logstash Output payload
LOGSTASHCRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/elasticfleet.crt)
LOGSTASHKEY=$(openssl rsa -in /opt/so/conf/elastic-fleet/certs/elasticfleet.key)
LOGSTASHCA=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt)
printf "\nCreate Logstash Output if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$( jq -n \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"so-manager_logstash","id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}'
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
)
# Add SO-Manager Logstash Ouput
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
printf "\n\n"
{%- endif %}
# Add Elastic Fleet Integrations
printf "\nAdd SO-Manager Fleet URL\n"
## This array replaces whatever URLs are currently configured
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/settings" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"fleet_server_hosts":["https://{{ GLOBALS.manager_ip }}:8220", "https://{{ GLOBALS.manager }}:8220"]}'
printf "\n\n"
# Add Elastic Fleet Server Agent Policy
#curl -vv -K /opt/so/conf/elasticsearch/curl.config -L \
#-X POST "localhost:5601/api/fleet/agent_policies" \
#-H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
#-d '{"name":"SO-Manager","id":"so-manager","description":"SO Manager Fleet Server Policy","namespace":"default","monitoring_enabled":["logs"],"has_fleet_server":true}'
# Add Agent Policy - SOS Grid Nodes
#curl -vv -K /opt/so/conf/elasticsearch/curl.config -L \
#-X POST "localhost:5601/api/fleet/agent_policies" \
#-H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
#-d '{"name":"SO-Grid","id":"so-grid","description":"SO Grid Endpoint Policy","namespace":"default","monitoring_enabled":["logs"]}'
### Create Policies & Associated Integration Configuration ###
# Add Agent Policy - Default endpoints
#curl -vv -K /opt/so/conf/elasticsearch/curl.config -L \
#-X POST "localhost:5601/api/fleet/agent_policies" \
#-H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
#-d '{"name":"Endpoints-Initalization","id":"endpoints","description":"Initial Endpoint Policy","namespace":"default","monitoring_enabled":["logs"]}'
# Manager Fleet Server Host
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" | jq
#Temp Fixup for ES Output bug
JSON_STRING=$( jq -n \
--arg NAME "FleetServer_{{ GLOBALS.hostname }}" \
'{"name": $NAME,"description": $NAME,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_{{ GLOBALS.hostname }}" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Initial Endpoints Policy
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false"
# Grid Nodes Policy
elastic_fleet_policy_create "so-grid-nodes" "SO Grid Node Policy" "false"
# Load Integrations for default policies
so-elastic-fleet-integration-policy-load
### Finalization ###
# Query for Enrollment Tokens for default policies
ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-default")) | .api_key')
GRIDNODESENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes")) | .api_key')
@@ -81,11 +76,10 @@ printf '%s\n'\
" es_token: '$ESTOKEN'"\
" endpoints_enrollment: '$ENDPOINTSENROLLMENTOKEN'"\
" grid_enrollment: '$GRIDNODESENROLLMENTOKEN'"\
" url: '{{ GLOBALS.manager_ip }}'"\
"" >> "$pillar_file"
#Store Grid Nodes Enrollment token in Global pillar
global_pillar_file=/opt/so/saltstack/local/pillar/soc_global.sls
global_pillar_file=/opt/so/saltstack/local/pillar/global/soc_global.sls
printf '%s\n'\
" fleet_grid_enrollment_token: '$GRIDNODESENROLLMENTOKEN'"\
"" >> "$global_pillar_file"
@@ -93,17 +87,6 @@ printf '%s\n'\
# Call Elastic-Fleet Salt State
salt-call state.apply elasticfleet queue=True
# Load Elastic Fleet integrations
/usr/sbin/so-elastic-fleet-integration-policy-load
# Temp
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.6.2/so-elastic-agent-8.6.2-darwin-x86_64.tar.gz
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.6.2/so-elastic-agent-8.6.2-linux-x86_64.tar.gz
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.6.2/so-elastic-agent-8.6.2-windows-x86_64.tar.gz
#git clone -b 2.4-so-elastic-agent https://github.com/Security-Onion-Solutions/securityonion-image.git
#cd securityonion-image/so-elastic-agent-builder
#docker build -t so-elastic-agent-builder .
# Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers
salt-call state.apply elasticfleet.install_agent_grid queue=True

View File

@@ -9,4 +9,4 @@
. /usr/sbin/so-common
/usr/sbin/so-start elastic-fleet $1
/usr/sbin/so-start elasticfleet $1

View File

@@ -9,4 +9,4 @@
. /usr/sbin/so-common
/usr/sbin/so-stop elastic-fleet $1
/usr/sbin/so-stop elasticfleet $1

View File

@@ -43,7 +43,7 @@ APPLY=${APPLY,,}
function rolecall() {
THEROLE=$1
THEROLES="analyst analyst_workstations beats_endpoint beats_endpoint_ssl elastic_agent_endpoint elasticsearch_rest endgame eval heavynodes idh manager managersearch receivers searchnodes sensors standalone strelka_frontend syslog"
THEROLES="analyst analyst_workstations beats_endpoint beats_endpoint_ssl elastic_agent_endpoint elasticsearch_rest endgame eval fleet heavynodes idh manager managersearch receivers searchnodes sensors standalone strelka_frontend syslog"
for AROLE in $THEROLES; do
if [ "$AROLE" = "$THEROLE" ]; then

View File

@@ -56,8 +56,11 @@ fi
so-firewall --role=sensors --ip="$IP"
so-firewall --apply=true --role=searchnodes --ip="$IP"
;;
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER')
'FLEET' | 'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER')
case "$ROLE" in
'FLEET')
so-firewall --apply=true --role=fleet --ip="$IP"
;;
'SENSOR')
so-firewall --apply=true --role=sensors --ip="$IP"
;;

View File

@@ -132,6 +132,22 @@ function add_elastic_to_minion() {
" " >> $PILLARFILE
}
# Add Elastic Fleet Server settings to the minion file
function add_fleet_to_minion() {
# Create ES Token for Fleet server (Curl to Kibana API)
# TODO: Add error handling
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
# Write out settings to minion file
printf '%s\n'\
"elasticfleet:"\
" server:"\
" es_token: '$ESTOKEN'"\
" " >> $PILLARFILE
}
# Add IDH Services info to the minion file
function add_idh_to_minion() {
printf '%s\n'\
@@ -202,6 +218,43 @@ function add_sensor_to_minion() {
echo " enabled: True" >> $PILLARFILE
}
function create_fleet_policy() {
JSON_STRING=$( jq -n \
--arg NAME "FleetServer_$LSHOSTNAME" \
--arg DESC "Fleet Server - $LSHOSTNAME" \
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":1209600,"has_fleet_server":true}'
)
# Create Fleet Sever Policy
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
JSON_STRING_UPDATE=$( jq -n \
--arg NAME "FleetServer_$LSHOSTNAME" \
--arg DESC "Fleet Server - $LSHOSTNAME" \
'{"name":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":1209600,"data_output_id":"so-manager_elasticsearch"}'
)
# Update Fleet Policy - ES Output
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_$LSHOSTNAME" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING_UPDATE"
}
function update_fleet_host_urls() {
# Query for current Fleet Host URLs & append New Fleet Node Hostname & IP
JSON_STRING=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts' | jq --arg HOSTNAME "https://$LSHOSTNAME:8220" --arg IP "https://$MAINIP:8220" -r '.items[].host_urls += [ $HOSTNAME, $IP ] | {"name":"Default","host_urls": .items[].host_urls,"is_default":true,"proxy_id":null}')
# Update Fleet Host URLs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/fleet_server_hosts/fleet-default-fleet-server-host" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
function update_logstash_outputs() {
# Query for current Logstash outputs & append New Fleet Node Hostname & IP
JSON_STRING=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' | jq --arg HOSTNAME "$LSHOSTNAME:5055" --arg IP "$MAINIP:5055" -r '.item.hosts += [ $HOSTNAME, $IP ] | {"name":"grid-logstash","type":"logstash","hosts": .item.hosts,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
# Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
function updateMine() {
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC"
}
@@ -214,6 +267,14 @@ function createEVAL() {
add_sensor_to_minion
}
function createFLEET() {
add_fleet_to_minion
add_logstash_to_minion
create_fleet_policy
update_fleet_host_urls
update_logstash_outputs
}
function createIDH() {
add_idh_to_minion
}

View File

@@ -170,7 +170,8 @@ def main():
if "-h" in options or "--help" in options or "-?" in options:
showUsage(options, None)
if os.environ["USER"] != "root":
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
if proc.stdout.strip() != "0":
fail("This program must be run as root")
console = Console()

View File

@@ -13,74 +13,74 @@ DEFAULT_ROLE=analyst
function usage() {
cat <<USAGE_EOF
Usage: $0 <operation> [supporting parameters]"
Usage: $0 <operation> [supporting parameters]
where <operation> is one of the following:"
where <operation> is one of the following:
list: Lists all user email addresses currently defined in the identity system"
list: Lists all user email addresses currently defined in the identity system
add: Adds a new user to the identity system"
Required parameters: "
--email <email>"
Optional parameters: "
--role <role> (defaults to $DEFAULT_ROLE)"
--firstName <firstName> (defaults to blank)"
--lastName <lastName> (defaults to blank)"
--note <note> (defaults to blank)"
add: Adds a new user to the identity system
Required parameters:
--email <email>
Optional parameters:
--role <role> (defaults to $DEFAULT_ROLE)
--firstName <firstName> (defaults to blank)
--lastName <lastName> (defaults to blank)
--note <note> (defaults to blank)
--skip-sync (defers the Elastic sync until the next scheduled time)
addrole: Grants a role to an existing user"
Required parameters: "
--email <email>"
--role <role>"
Optional parameters: "
addrole: Grants a role to an existing user
Required parameters:
--email <email>
--role <role>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
delrole: Removes a role from an existing user"
Required parameters: "
--email <email>"
--role <role>"
Optional parameters: "
delrole: Removes a role from an existing user
Required parameters:
--email <email>
--role <role>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
password: Updates a user's password and disables MFA"
Required parameters: "
--email <email>"
Optional parameters: "
password: Updates a user's password and disables MFA
Required parameters:
--email <email>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
profile: Updates a user's profile information"
Required parameters: "
--email <email>"
Optional parameters: "
--role <role> (defaults to $DEFAULT_ROLE)"
--firstName <firstName> (defaults to blank)"
--lastName <lastName> (defaults to blank)"
--note <note> (defaults to blank)"
profile: Updates a user's profile information
Required parameters:
--email <email>
Optional parameters:
--role <role> (defaults to $DEFAULT_ROLE)
--firstName <firstName> (defaults to blank)
--lastName <lastName> (defaults to blank)
--note <note> (defaults to blank)
enable: Enables a user"
Required parameters: "
--email <email>"
Optional parameters: "
enable: Enables a user
Required parameters:
--email <email>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
disable: Disables a user"
Required parameters: "
--email <email>"
Optional parameters: "
disable: Disables a user
Required parameters:
--email <email>
Optional parameters:
--skip-sync (defers the Elastic sync until the next scheduled time)
validate: Validates that the given email address and password are acceptable"
Required parameters: "
--email <email>"
validate: Validates that the given email address and password are acceptable
Required parameters:
--email <email>
valemail: Validates that the given email address is acceptable; requires 'email' parameter"
Required parameters: "
--email <email>"
valemail: Validates that the given email address is acceptable; requires 'email' parameter
Required parameters:
--email <email>
valpass: Validates that a password is acceptable"
valpass: Validates that a password is acceptable
Note that the password can be piped into STDIN to avoid prompting for it"
Note that the password can be piped into STDIN to avoid prompting for it
USAGE_EOF
exit 1
}

View File

@@ -16,67 +16,35 @@ overlimit() {
[[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]]
}
closedindices() {
# If we can't query Elasticsearch, then immediately return false.
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1
[ $? -eq 1 ] && return false
# First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'.
# Next, filter out any so-case indices.
# Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices.
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)"
}
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
# 2. Are there any closed indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while overlimit && closedindices; do
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
# We iterate through the closed indices
for CLOSED_INDEX in ${CLOSED_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
/usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG}
fi
if ! overlimit; then
exit
fi
done
done
# Check to see if Elasticsearch indices using more disk space than LOG_SIZE_LIMIT
# Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, we will break out of the loop.
while overlimit; do
# We need to determine the oldest open index.
# First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'.
# Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices.
# Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field.
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
# We iterate through the open indices
for OPEN_INDEX in ${OPEN_INDICES}; do
# If we can't query Elasticsearch, then immediately return false.
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status > /dev/null 2>&1
[ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
# We iterate through the closed and open indices
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
/usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG}
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
fi
else
# We delete the entire data stream, since there is only one backing index
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} -XDELETE >> ${LOG} 2>&1
fi
if ! overlimit; then
exit

View File

@@ -126,9 +126,10 @@ delete_so-curator_so-status.disabled:
- regex: ^so-curator$
{% endif %}
so-curatorclusterclose:
so-curator-cluster-close:
cron.present:
- name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1
- identifier: so-curator-cluster-close
- user: root
- minute: '2'
- hour: '*/1'
@@ -136,9 +137,10 @@ so-curatorclusterclose:
- month: '*'
- dayweek: '*'
so-curatorclusterdeletecron:
so-curator-cluster-delete:
cron.present:
- name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1
- identifier: so-curator-cluster-delete
- user: root
- minute: '*/5'
- hour: '*'

View File

@@ -38,6 +38,7 @@ docker:
- 0.0.0.0:3765:3765
- 0.0.0.0:5044:5044
- 0.0.0.0:5055:5055
- 0.0.0.0:5056:5056
- 0.0.0.0:5644:5644
- 0.0.0.0:6050:6050
- 0.0.0.0:6051:6051

View File

@@ -0,0 +1,20 @@
{
"package": {
"name": "osquery_manager",
"version": "1.6.0"
},
"name": "osquery-endpoints",
"namespace": "default",
"policy_id": "endpoints-initial",
"inputs": {
"osquery_manager-osquery": {
"enabled": true,
"streams": {
"osquery_manager.result": {
"enabled": true,
"vars": {}
}
}
}
}
}

View File

@@ -0,0 +1,76 @@
{
"package": {
"name": "system",
"version": "1.25.2"
},
"name": "system-endpoints",
"namespace": "default",
"policy_id": "endpoints-initial",
"inputs": {
"system-logfile": {
"enabled": true,
"streams": {
"system.auth": {
"enabled": true,
"vars": {
"ignore_older": "72h",
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"preserve_original_event": false,
"tags": [
"system-auth"
]
}
},
"system.syslog": {
"enabled": true,
"vars": {
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [],
"ignore_older": "72h"
}
}
}
},
"system-winlog": {
"enabled": true,
"streams": {
"system.application": {
"enabled": true,
"vars": {
"preserve_original_event": false,
"ignore_older": "72h",
"language": 0,
"tags": []
}
},
"system.security": {
"enabled": true,
"vars": {
"preserve_original_event": false,
"ignore_older": "72h",
"language": 0,
"tags": []
}
},
"system.system": {
"enabled": true,
"vars": {
"preserve_original_event": false,
"ignore_older": "72h",
"language": 0,
"tags": []
}
}
}
},
"system-system/metrics": {
"enabled": false
}
}
}

View File

@@ -0,0 +1,59 @@
{
"package": {
"name": "windows",
"version": "1.19.1"
},
"name": "windows-endpoints",
"namespace": "default",
"policy_id": "endpoints-initial",
"inputs": {
"windows-winlog": {
"enabled": true,
"streams": {
"windows.forwarded": {
"enabled": true,
"vars": {
"preserve_original_event": false,
"ignore_older": "72h",
"language": 0,
"tags": [
"forwarded"
]
}
},
"windows.powershell": {
"enabled": true,
"vars": {
"preserve_original_event": false,
"event_id": "400, 403, 600, 800",
"ignore_older": "72h",
"language": 0,
"tags": []
}
},
"windows.powershell_operational": {
"enabled": true,
"vars": {
"preserve_original_event": false,
"event_id": "4103, 4104, 4105, 4106",
"ignore_older": "72h",
"language": 0,
"tags": []
}
},
"windows.sysmon_operational": {
"enabled": true,
"vars": {
"preserve_original_event": false,
"ignore_older": "72h",
"language": 0,
"tags": []
}
}
}
},
"windows-windows/metrics": {
"enabled": false
}
}
}

View File

@@ -0,0 +1,106 @@
{
"package": {
"name": "elasticsearch",
"version": "1.0.0"
},
"name": "elasticsearch-logs",
"namespace": "default",
"description": "Elasticsearch Logs",
"policy_id": "so-grid-nodes",
"inputs": {
"elasticsearch-logfile": {
"enabled": true,
"streams": {
"elasticsearch.audit": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_audit.json"
]
}
},
"elasticsearch.deprecation": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_deprecation.json"
]
}
},
"elasticsearch.gc": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/gc.log.[0-9]*",
"/var/log/elasticsearch/gc.log"
]
}
},
"elasticsearch.server": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/elasticsearch/*.log"
]
}
},
"elasticsearch.slowlog": {
"enabled": false,
"vars": {
"paths": [
"/var/log/elasticsearch/*_index_search_slowlog.json",
"/var/log/elasticsearch/*_index_indexing_slowlog.json"
]
}
}
}
},
"elasticsearch-elasticsearch/metrics": {
"enabled": false,
"vars": {
"hosts": [
"http://localhost:9200"
],
"scope": "node"
},
"streams": {
"elasticsearch.stack_monitoring.ccr": {
"enabled": false
},
"elasticsearch.stack_monitoring.cluster_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.enrich": {
"enabled": false
},
"elasticsearch.stack_monitoring.index": {
"enabled": false
},
"elasticsearch.stack_monitoring.index_recovery": {
"enabled": false,
"vars": {
"active.only": true
}
},
"elasticsearch.stack_monitoring.index_summary": {
"enabled": false
},
"elasticsearch.stack_monitoring.ml_job": {
"enabled": false
},
"elasticsearch.stack_monitoring.node": {
"enabled": false
},
"elasticsearch.stack_monitoring.node_stats": {
"enabled": false
},
"elasticsearch.stack_monitoring.pending_tasks": {
"enabled": false
},
"elasticsearch.stack_monitoring.shard": {
"enabled": false
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.1"
},
"name": "idh-logs",
"namespace": "so",
"description": "IDH integration",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/nsm/idh/opencanary.log"
],
"data_stream.dataset": "idh",
"tags": [],
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.0"
},
"name": "import-evtx-logs",
"namespace": "so",
"description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/nsm/import/*/evtx/data.json"
],
"data_stream.dataset": "import",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: windows_eventlog\n imported: true",
"custom": "pipeline: import.wel"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.0"
},
"name": "import-suricata-logs",
"namespace": "so",
"description": "Import Suricata logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/nsm/import/*/suricata/eve*.json"
],
"data_stream.dataset": "import",
"tags": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
"custom": "pipeline: suricata.common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.0"
},
"name": "import-zeek-logs",
"namespace": "so",
"description": "Zeek Import logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/nsm/import/*/zeek/logs/*.log"
],
"data_stream.dataset": "import",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.0"
},
"name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/kratos/kratos.log"
],
"data_stream.dataset": "kratos",
"tags": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
"custom": "pipeline: kratos"
}
}
}
}
}
}

View File

@@ -0,0 +1,20 @@
{
"package": {
"name": "osquery_manager",
"version": "1.6.0"
},
"name": "osquery-grid-nodes",
"namespace": "default",
"policy_id": "so-grid-nodes",
"inputs": {
"osquery_manager-osquery": {
"enabled": true,
"streams": {
"osquery_manager.result": {
"enabled": true,
"vars": {}
}
}
}
}
}

View File

@@ -0,0 +1,76 @@
{
"package": {
"name": "redis",
"version": "1.4.0"
},
"name": "redis-logs",
"namespace": "default",
"description": "Redis logs",
"policy_id": "so-grid-nodes",
"inputs": {
"redis-logfile": {
"enabled": true,
"streams": {
"redis.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/redis/redis.log"
],
"tags": [
"redis-log"
],
"preserve_original_event": false
}
}
}
},
"redis-redis": {
"enabled": false,
"streams": {
"redis.slowlog": {
"enabled": false,
"vars": {
"hosts": [
"127.0.0.1:6379"
],
"password": ""
}
}
}
},
"redis-redis/metrics": {
"enabled": false,
"vars": {
"hosts": [
"127.0.0.1:6379"
],
"idle_timeout": "20s",
"maxconn": 10,
"network": "tcp",
"password": ""
},
"streams": {
"redis.info": {
"enabled": false,
"vars": {
"period": "10s"
}
},
"redis.key": {
"enabled": false,
"vars": {
"key.patterns": "- limit: 20\n pattern: *\n",
"period": "10s"
}
},
"redis.keyspace": {
"enabled": false,
"vars": {
"period": "10s"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.2"
},
"name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/sync.log"
],
"data_stream.dataset": "soc",
"tags": [],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.2"
},
"name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/salt-relay.log"
],
"data_stream.dataset": "soc",
"tags": [],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.2"
},
"name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/sensoroni/sensoroni.log"
],
"data_stream.dataset": "soc",
"tags": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.2"
},
"name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/sensoroni-server.log"
],
"data_stream.dataset": "soc",
"tags": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.0"
},
"name": "strelka-logs",
"namespace": "so",
"description": "Strelka logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/nsm/strelka/log/strelka.log"
],
"data_stream.dataset": "strelka",
"tags": [],
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
"custom": "pipeline: strelka.file"
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.0"
},
"name": "suricata-logs",
"namespace": "so",
"description": "Suricata integration",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/nsm/suricata/eve*.json"
],
"data_stream.dataset": "suricata",
"tags": [],
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
"custom": "pipeline: suricata.common"
}
}
}
}
}
}

View File

@@ -0,0 +1,32 @@
{
"package": {
"name": "tcp",
"version": "1.5.0"
},
"name": "syslog-tcp-514",
"namespace": "so",
"description": "Syslog Over TCP Port 514",
"policy_id": "so-grid-nodes",
"inputs": {
"tcp-tcp": {
"enabled": true,
"streams": {
"tcp.generic": {
"enabled": true,
"vars": {
"listen_address": "0.0.0.0",
"listen_port": "514",
"data_stream.dataset": "syslog",
"pipeline": "syslog",
"processors": "- add_fields:\n target: event\n fields:\n module: syslog",
"tags": [
"syslog"
],
"syslog_options": "field: message\n#format: auto\n#timezone: Local",
"ssl": ""
}
}
}
}
}
}

View File

@@ -0,0 +1,33 @@
{
"package": {
"name": "udp",
"version": "1.5.0"
},
"name": "syslog-udp-514",
"namespace": "so",
"description": "Syslog over UDP Port 514",
"policy_id": "so-grid-nodes",
"inputs": {
"udp-udp": {
"enabled": true,
"streams": {
"udp.generic": {
"enabled": true,
"vars": {
"listen_address": "0.0.0.0",
"listen_port": "514",
"data_stream.dataset": "syslog",
"pipeline": "syslog",
"max_message_size": "10KiB",
"keep_null": false,
"processors": "- add_fields:\n target: event\n fields: \n module: syslog\n",
"tags": [
"syslog"
],
"syslog_options": "field: message\n#format: auto\n#timezone: Local"
}
}
}
}
}
}

View File

@@ -0,0 +1,47 @@
{
"package": {
"name": "system",
"version": "1.25.2"
},
"name": "system-grid-nodes",
"namespace": "default",
"policy_id": "so-grid-nodes",
"inputs": {
"system-logfile": {
"enabled": true,
"streams": {
"system.auth": {
"enabled": true,
"vars": {
"ignore_older": "72h",
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"preserve_original_event": false,
"tags": [
"system-auth"
]
}
},
"system.syslog": {
"enabled": true,
"vars": {
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [],
"ignore_older": "72h"
}
}
}
},
"system-winlog": {
"enabled": false
},
"system-system/metrics": {
"enabled": false
}
}
}

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "log",
"version": "1.1.0"
},
"name": "zeek-logs",
"namespace": "so",
"description": "Zeek logs",
"policy_id": "so-grid-nodes",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.log": {
"enabled": true,
"vars": {
"paths": [
"/nsm/zeek/logs/current/*.log"
],
"data_stream.dataset": "zeek",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
"custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n"
}
}
}
}
}
}

View File

@@ -8,8 +8,8 @@
# These values are generated during node install and stored in minion pillar
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:server:es_token','') %}
{% set FLEETSERVERPOLICY = salt['pillar.get']('elasticfleet:server:server_policy','so-manager') %}
{% set FLEETURL = salt['pillar.get']('elasticfleet:server:url') %}
#{% set FLEETSERVERPOLICY = salt['pillar.get']('elasticfleet:server:server_policy','so-manager') %}
#{% set FLEETURL = salt['pillar.get']('elasticfleet:server:url') %}
# Add EA Group
elasticsagentgroup:
@@ -45,35 +45,32 @@ so-elastic-fleet:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent:{{ GLOBALS.so_version }}
- name: so-elastic-fleet
- hostname: Fleet-{{ GLOBALS.hostname }}
- hostname: FleetServer-{{ GLOBALS.hostname }}
- detach: True
- user: 947
- networks:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
- extra_hosts:
{% if GLOBALS.is_manager %}
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% else %}
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %}
- {{ BINDING }}
{% endfor %}
- binds:
- /opt/so/conf/elastic-fleet/certs:/etc/pki:ro
- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw
- /etc/pki:/etc/pki:ro
#- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw
- environment:
- FLEET_SERVER_ENABLE=true
- FLEET_URL=https://{{ FLEETURL }}:8220
- FLEET_URL=https://{{ GLOBALS.node_ip }}:8220
- FLEET_SERVER_ELASTICSEARCH_HOST=https://{{ GLOBALS.manager }}:9200
- FLEET_SERVER_SERVICE_TOKEN={{ SERVICETOKEN }}
- FLEET_SERVER_POLICY_ID={{ FLEETSERVERPOLICY }}
- FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/intca.crt
- FLEET_SERVER_POLICY_ID=FleetServer_{{ GLOBALS.hostname }}
- FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/tls/certs/intca.crt
- FLEET_SERVER_CERT=/etc/pki/elasticfleet.crt
- FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet.key
- FLEET_CA=/etc/pki/intca.crt
- FLEET_CA=/etc/pki/tls/certs/intca.crt
{% endif %}
append_so-elastic-fleet_so-status.conf:

View File

@@ -13,8 +13,8 @@
{ "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
{ "rename": { "field": "message2.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
{ "rename": { "field": "message2.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
{ "pipeline": { "name": "common" } }
]

View File

@@ -177,6 +177,7 @@ esyml:
ESCONFIG: {{ ESCONFIG }}
- template: jinja
{% if GLOBALS.role != "so-searchnode" %}
escomponenttemplates:
file.recurse:
- name: /opt/so/conf/elasticsearch/templates/component
@@ -219,6 +220,7 @@ es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
- cmd: so-elasticsearch-templates
{% endfor %}
{% endif %}
{% endif %}
esroles:
file.recurse:
@@ -363,6 +365,8 @@ append_so-elasticsearch_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-elasticsearch
{% if GLOBALS.role != "so-searchnode" %}
so-es-cluster-settings:
cmd.run:
- name: /usr/sbin/so-elasticsearch-cluster-settings
@@ -406,7 +410,7 @@ so-elasticsearch-roles-load:
- require:
- docker_container: so-elasticsearch
- file: es_sync_scripts
{% endif %}
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -18,6 +18,7 @@
"indices": [
{
"names": [
"logs-*",
"so-*"
],
"privileges": [

View File

@@ -16,6 +16,7 @@
"indices": [
{
"names": [
"logs-*",
"so-*"
],
"privileges": [

View File

@@ -4,6 +4,7 @@
"indices": [
{
"names": [
"logs-*",
"so-*"
],
"privileges": [

View File

@@ -4,6 +4,7 @@
"indices": [
{
"names": [
"logs-*",
"so-*"
],
"privileges": [

View File

@@ -66,6 +66,41 @@ role:
localhost:
portgroups:
- {{ portgroups.all }}
fleet:
chain:
DOCKER-USER:
hostgroups:
sensors:
portgroups:
- {{ portgroups.elastic_agent_control }}
- {{ portgroups.elastic_agent_data }}
elastic_agent_endpoint:
portgroups:
- {{ portgroups.elastic_agent_control }}
- {{ portgroups.elastic_agent_data }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
standalone:
portgroups:
- {{ portgroups.salt_manager }}
sensors:
portgroups:
- {{ portgroups.salt_manager }}
searchnodes:
portgroups:
- {{ portgroups.salt_manager }}
heavynodes:
portgroups:
- {{ portgroups.salt_manager }}
manager:
chain:
DOCKER-USER:
@@ -272,12 +307,25 @@ role:
- {{ portgroups.yum }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
- {{ portgroups.beats_5056 }}
- {{ portgroups.redis }}
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.elastic_agent_control }}
- {{ portgroups.elastic_agent_data }}
- {{ portgroups.endgame }}
- {{ portgroups.strelka_frontend }}
fleet:
portgroups:
- {{ portgroups.elasticsearch_rest }}
- {{ portgroups.docker_registry }}
- {{ portgroups.influxdb }}
- {{ portgroups.sensoroni }}
- {{ portgroups.yum }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
- {{ portgroups.beats_5056 }}
- {{ portgroups.elastic_agent_control }}
- {{ portgroups.elastic_agent_data }}
sensors:
portgroups:
- {{ portgroups.docker_registry }}
@@ -286,6 +334,7 @@ role:
- {{ portgroups.yum }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
- {{ portgroups.beats_5056 }}
- {{ portgroups.elastic_agent_control }}
- {{ portgroups.elastic_agent_data }}
searchnodes:
@@ -342,6 +391,9 @@ role:
dockernet:
portgroups:
- {{ portgroups.all }}
fleet:
portgroups:
- {{ portgroups.salt_manager }}
localhost:
portgroups:
- {{ portgroups.all }}

View File

@@ -93,6 +93,12 @@
'so-idh',
] %}
{% elif GLOBALS.role == 'so-fleet' %}
{% set NODE_CONTAINERS = [
'so-elastic-fleet',
'so-logstash',
] %}
{% elif GLOBALS.role == 'so-sensor' %}
{% set NODE_CONTAINERS = [] %}

View File

View File

@@ -17,6 +17,9 @@ firewall:
beats_5066:
tcp:
- 5066
beats_5056:
tcp:
- 5056
docker_registry:
tcp:
- 5000
@@ -32,7 +35,6 @@ firewall:
elastic_agent_data:
tcp:
- 5055
- 9200
endgame:
tcp:
- 3765

View File

@@ -20,9 +20,10 @@ idstoolslogdir:
- group: 939
- makedirs: True
so-ruleupdatecron:
so-rule-update:
cron.present:
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
- identifier: so-rule-update
- user: root
- minute: '1'
- hour: '7'

View File

@@ -127,6 +127,7 @@ metrics_link_file:
get_influxdb_size:
cron.present:
- name: 'du -s -k /nsm/influxdb | cut -f1 > /opt/so/log/telegraf/influxdb_size.log 2>&1'
- identifier: get_influxdb_size
- user: root
- minute: '*/1'
- hour: '*'

View File

@@ -63,7 +63,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.6.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.7.0" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done

View File

@@ -31,62 +31,3 @@ kibana:
reporting:
kibanaServer:
hostname: localhost
fleet:
packages:
- name: fleet_server
version: latest
- name: log
version: latest
- name: osquery_manager
version: latest
- name: system
version: latest
- name: windows
version: latest
agentPolicies:
- name: SO-Manager
id: so-manager
description: "SO Manager Fleet Server Policy"
namespace: default
is_default_fleet_server: true
monitoring_enabled: ['logs']
package_policies:
- name: fleet-server_manager
package:
name: fleet_server
- name: SO-Grid-Nodes
id: so-grid-nodes
description: "SO Grid Node Policy"
namespace: default
monitoring_enabled: ['logs']
package_policies:
- name: osquery-grid-nodes
package:
name: osquery_manager
- name: system-grid-nodes
package:
name: system
inputs:
- type: system/metrics
enabled: false
- name: Endpoints-Initial
id: endpoints-default
description: "Initial Endpoint Policy"
namespace: default
monitoring_enabled: ['logs']
package_policies:
- name: system-endpoints
package:
name: system
inputs:
- type: system/metrics
enabled: false
- name: osquery-endpoints
package:
name: osquery_manager
- name: windows-endpoints
package:
name: windows
inputs:
- type: windows/metrics
enabled: false

View File

@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.6.2","id": "8.6.2","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.7.0","id": "8.7.0","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

File diff suppressed because one or more lines are too long

View File

@@ -108,12 +108,6 @@ append_so-kibana_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-kibana
osquery_hunt_link:
cmd.script:
- source: salt://kibana/files/live_query_fixup.sh
- cwd: /root
- template: jinja
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -22,7 +22,7 @@
include:
- ssl
{% if GLOBALS.role not in ['so-receiver'] %}
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
- elasticsearch
{% endif %}
@@ -164,6 +164,10 @@ so-logstash:
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-eval','so-fleet'] %}
- /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
- /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.p8:/usr/share/logstash/elasticfleet-logstash.key:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{% else %}

View File

@@ -4,7 +4,7 @@
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{% for node_type, node_details in node_data.items() | sort %}
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch'] %}
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %}
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
{% for hostname in node_data[node_type].keys() %}
{% do REDIS_NODES.append({hostname:node_details[hostname].ip}) %}

View File

@@ -4,9 +4,14 @@ input {
tags => [ "elastic-agent" ]
ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key"
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
ssl_verify_mode => "force_peer"
ecs_compatibility => v8
}
}
filter {
mutate {
rename => {"@metadata" => "metadata"}
}
}

View File

@@ -0,0 +1,13 @@
input {
http {
additional_codecs => { "application/json" => "json_lines" }
port => 5056
tags => [ "elastic-agent" ]
ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key"
ssl_verify_mode => "peer"
ecs_compatibility => v8
}
}

View File

@@ -0,0 +1,11 @@
output {
http {
url => 'https://{{ GLOBALS.manager }}:5056'
cacert => ["/usr/share/filebeat/ca.crt"]
http_method => post
retry_non_idempotent => true
format => json_batch
http_compression => true
ecs_compatibility => v8
}
}

View File

@@ -51,7 +51,7 @@ repo_sync_script:
- group: root
- mode: 755
reposync_cron:
so-repo-sync:
{% if MANAGERMERGED.reposync.enabled %}
cron.present:
{% else %}
@@ -59,6 +59,7 @@ reposync_cron:
{% endif %}
- user: socore
- name: '/usr/sbin/so-repo-sync >> /opt/so/log/reposync/reposync.log 2>&1'
- identifier: so-repo-sync
- hour: '{{ MANAGERMERGED.reposync.hour }}'
- minute: '{{ MANAGERMERGED.reposync.minute }}'
@@ -83,10 +84,11 @@ yara_update_script:
ISAIRGAP: {{ GLOBALS.airgap }}
EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }}
strelka_yara_update:
strelka-yara-update:
cron.present:
- user: root
- name: '/usr/sbin/so-yara-update >> /nsm/strelka/log/yara-update.log 2>&1'
- identifier: strelka-yara-update
- hour: '7'
- minute: '1'

View File

@@ -24,8 +24,9 @@ sync_es_users:
# we dont want this added too early in setup, so we add the onlyif to verify 'startup_states: highstate'
# is in the minion config. That line is added before the final highstate during setup
sosyncusers:
so-user_sync:
cron.present:
- user: root
- name: 'PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin /usr/sbin/so-user sync &>> /opt/so/log/soc/sync.log'
- identifier: so-user_sync
- onlyif: "grep 'startup_states: highstate' /etc/salt/minion"

View File

@@ -2,8 +2,13 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ntp/config.map.jinja' import NTPCONFIG %}
chrony_pkg:
pkg.installed:
- name: chrony
chronyconf:
file.managed:
- name: /etc/chrony.conf
@@ -12,8 +17,14 @@ chronyconf:
- defaults:
NTPCONFIG: {{ NTPCONFIG }}
{% if GLOBALS.os == 'Rocky' %}
chronyd:
{% else %}
chrony:
{% endif %}
service.running:
- enable: True
- watch:
- file: chronyconf
- require:
- pkg: chrony_pkg

View File

@@ -113,15 +113,17 @@ append_so-playbook_so-status.conf:
{% endif %}
so-playbooksynccron:
so-playbook-sync_cron:
cron.present:
- name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
- identifier: so-playbook-sync_cron
- user: root
- minute: '*/5'
so-playbookruleupdatecron:
so-playbook-ruleupdate_cron:
cron.present:
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
- identifier: so-playbook-ruleupdate_cron
- user: root
- minute: '1'
- hour: '6'

View File

@@ -1,31 +0,0 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQENBGLXV/8BCADCuomk2pibSOuLQeKMIwV3Afy60080hykdc4tU4qQS+zBJZZC0
VBl2TAOmMWyeY5DRF2ibRTx6Ap8qYefuEjWlo2WHWWZH4WhNkJWL3aWiu8Ga+fFo
ebjoUFLGgpKDGKveO9PF8A41IP1CLvDicpWXTxfqzQKDOvg3g5EmCx+5ksviXHJ1
lY5CBbhVPmU3ruzGBqN/6B90VyTicbIyIZKZdnElAqaW6OiEaOmj2Oadi3ARJLWA
8rpVPweZE0/S4B5UIuMh+JVJU3Os1BUXHKN3LAPENZa1NNYX3j53GxGMf+SAKe0g
QHe+fHiiB7a6iBl09W8cUJh8HINXW+vvU6mZABEBAAG0MlNhbHRTdGFjayBQYWNr
YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQFSBBMBCAA8FiEE
9+rekz4krjI0B2hWN6cQR50w17YFAmLXV/8CGwMFCwkIBwIDIgIBBhUKCQgLAgQW
AgMBAh4HAheAAAoJEDenEEedMNe2d0MH/36khQzCWMc5ezznO7bcOHOS3OWjQveF
Vv60y54QRnINCEa7w7ckjiap3dUSJxTo5eoAKNbgX5SgrshEY1HDXDoqgumHJLFW
J+L4f3CXFBhvObUOwB7ApUNHURcoNQYK7kS/vUJrQ3dFyT7uvgysGtv+/WpboY1s
ScJnVtWyQmLe7qj5pJ0aI5pPjFnP9869zPScNb6o6lbqGp/xhnL5NkZCF0DNgItw
HXyNsRPyc8JG+P+GP80XWZ37ajEdwkiPbtu3CD5pvBO1w5FPLBwuH5CSgQFEcA4V
QH8ThU0P1IhKe3xPRNgawcBTAHXqOD0OxilAIsQdfrKkRiTEcZtFZW25AQ0EYtdX
/wEIANFBzJfSks4ti/JQkECtEAwH7OtqUxu1QhSSRusGsQu/PpjBRZzlaVlKjS4c
fGTiZ8+25RX063vBQ+XpuTN9T9boEE4EywM11FCx1zRZIc+HlLOIJ10uKWUapmPM
+7flnQWXMgJzP47rHe0ofEHlP4/av5C1imgWEtEpYyn1B4qgSxvLFDq46rD5m+DP
2xNZbwWd0uSAG/wZNonVkISYymB0UTnUm8FABH1Ci7lXO9JnuW+IvVt32C5VibGy
FXdAJGmIiqsvBhJSUl+GJhO6NTXntuevqPLUXD9PuHWo4Vo1Afek8kqZByyiyrTZ
StDhrbo/8dSAVQMibLEfNS7R0QkAEQEAAYkBNgQYAQgAIBYhBPfq3pM+JK4yNAdo
VjenEEedMNe2BQJi11f/AhsMAAoJEDenEEedMNe2zhgH/0wxbQpaCho0BRbUbe6L
jm9r3yTWn6M+yYv+cBeH9sbobIVOqTvZcawzTEPWa+eVbKgkqhZjUTyfFDpjq9s6
67zLZnCh85hLoyieSQBER59dc1pmqZJP3VrAIT1lGKMIdjZoN8JAF8IbmJHE1j65
iZZdhbxfFHnDx22gQ+3nfniTNTWsfVAQeoAjeOuakPKdfUEMsXPBhtBBuFY4NcrT
TIsBevT4J/STCLkEqlMtYC8ldxUCZqQXdtxqltC4k+y0kp4PmNc3/Vmp65oAeuxI
d8TNwgZdamdinv5mPrTfBqSNiELQAcPQnOwpsqEDYF2pq9L4sdNGavP5ZvPGRLkH
+uU=
=383D
-----END PGP PUBLIC KEY BLOCK-----

View File

@@ -1,20 +0,0 @@
# this removes the repo file left by bootstrap-salt.sh without -r
remove_salt.list:
file.absent:
- name: /etc/apt/sources.list.d/salt.list
saltstack.list:
file.managed:
- name: /etc/apt/sources.list.d/saltstack.list
- contents:
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt3004.2/ {{grains.oscodename}} main
apt_update:
cmd.run:
- name: apt-get update
- onchanges:
- file: saltstack.list
- timeout: 30
- retry:
attempts: 5
interval: 30

View File

@@ -1,17 +1,9 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.os != 'Rocky' %}
{% if grains.oscodename == 'focal' %}
saltpymodules:
pkg.installed:
- pkgs:
{% if grains['oscodename'] == 'bionic' %}
- python-m2crypto
- python-docker
{% elif grains['oscodename'] == 'focal' %}
- python3-m2crypto
- python3-docker
{% endif %}
{% endif %}
salt_bootstrap:
file.managed:

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
version: 3006.0+0na.61a7bd9
version: 3006.0rc3

View File

@@ -12,8 +12,9 @@ state-apply-test:
start: 0
end: 180
/usr/sbin/so-salt-minion-check -q:
so-salt-minion-check_cron:
cron.present:
- identifier: so-salt-minion-check
- name: /usr/sbin/so-salt-minion-check -q
- identifier: so-salt-minion-check_cron
- user: root
- minute: '*/5'

View File

@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
version: 3006.0+0na.61a7bd9
version: 3006.0rc3
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds.

View File

@@ -1,4 +1,4 @@
schedule:
highstate_schedule:
schedule.present:
- function: state.highstate
- minutes: 15

View File

@@ -1,6 +1,7 @@
post_setup_cron:
cron.present:
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
- identifier: post_setup_cron
- user: root
- minute: '*/1'
- identifier: post_setup_cron

View File

@@ -6,7 +6,8 @@
PIPE_OWNER=${PIPE_OWNER:-socore}
PIPE_GROUP=${PIPE_GROUP:-socore}
SOC_PIPE=${SOC_PIPE_REQUEST:-/opt/so/conf/soc/salt/pipe}
SOC_PIPE=${SOC_PIPE:-/opt/so/conf/soc/salt/pipe}
CMD_PREFIX=${CMD_PREFIX:-""}
PATH=${PATH}:/usr/sbin
function log() {
@@ -26,7 +27,7 @@ function make_pipe() {
make_pipe "${SOC_PIPE}"
function list_minions() {
response=$(so-minion -o=list)
response=$($CMD_PREFIX so-minion -o=list)
exit_code=$?
if [[ $exit_code -eq 0 ]]; then
log "Successful command execution"
@@ -42,7 +43,7 @@ function manage_minion() {
op=$(echo "$request" | jq -r .operation)
id=$(echo "$request" | jq -r .id)
response=$(so-minion "-o=$op" "-m=$id")
response=$($CMD_PREFIX so-minion "-o=$op" "-m=$id")
exit_code=$?
if [[ exit_code -eq 0 ]]; then
log "Successful command execution"
@@ -75,14 +76,14 @@ function manage_user() {
add|enable|disable|delete)
email=$(echo "$request" | jq -r .email)
log "Performing user '$op' for user '$email'"
response=$(so-user "$op" --email "$email" --skip-sync)
response=$($CMD_PREFIX so-user "$op" --email "$email" --skip-sync)
exit_code=$?
;;
addrole|delrole)
email=$(echo "$request" | jq -r .email)
role=$(echo "$request" | jq -r .role)
log "Performing '$op' for user '$email' with role '$role'"
response=$(so-user "$op" --email "$email" --role "$role" --skip-sync)
response=$($CMD_PREFIX so-user "$op" --email "$email" --role "$role" --skip-sync)
exit_code=$?
;;
password)
@@ -98,12 +99,12 @@ function manage_user() {
lastName=$(echo "$request" | jq -r .lastName)
note=$(echo "$request" | jq -r .note)
log "Performing '$op' update for user '$email' with firstname '$firstName', lastname '$lastName', and note '$note'"
response=$(so-user "$op" --email "$email" --firstName "$firstName" --lastName "$lastName" --note "$note")
response=$($CMD_PREFIX so-user "$op" --email "$email" --firstName "$firstName" --lastName "$lastName" --note "$note")
exit_code=$?
;;
sync)
log "Performing '$op'"
response=$(so-user "$op")
response=$($CMD_PREFIX so-user "$op")
exit_code=$?
;;
*)
@@ -142,17 +143,17 @@ function manage_salt() {
state)
log "Performing '$op' for '$state' on minion '$minion'"
state=$(echo "$request" | jq -r .state)
response=$(salt --async "$minion" state.apply "$state" queue=True)
response=$($CMD_PREFIX salt --async "$minion" state.apply "$state" queue=True)
exit_code=$?
;;
highstate)
log "Performing '$op' on minion $minion"
response=$(salt --async "$minion" state.highstate queue=True)
response=$($CMD_PREFIX salt --async "$minion" state.highstate queue=True)
exit_code=$?
;;
activejobs)
response=$($CMD_PREFIX salt-run jobs.active -out json -l quiet)
log "Querying active salt jobs"
response=$(salt-run jobs.active -out json -l quiet)
$(echo "$response" > "${SOC_PIPE}")
return
;;

View File

@@ -10,4 +10,4 @@ To see all the latest features and fixes in this version of Security Onion, clic
## Customize This Space
Make this area your own by customizing the content in the [Config](/#/config) interface.
Make this area your own by customizing the content in the [Config](/#/config?s=soc.files.soc.motd__md) interface.

View File

@@ -91,6 +91,7 @@ socusersroles:
salt-relay:
cron.present:
- name: 'ps -ef | grep salt-relay.sh | grep -v grep > /dev/null 2>&1 || /opt/so/saltstack/default/salt/soc/files/bin/salt-relay.sh >> /opt/so/log/soc/salt-relay.log 2>&1 &'
- identifier: salt-relay
so-soc:
docker_container.running:

View File

@@ -51,17 +51,13 @@ m2cryptopkgs:
influxdb_key:
x509.private_key_managed:
- name: /etc/pki/influxdb.key
- CN: {{ GLOBALS.hostname }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/influxdb.key') -%}
- prereq:
- x509: /etc/pki/influxdb.crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -72,7 +68,7 @@ influxdb_crt:
- name: /etc/pki/influxdb.crt
- ca_server: {{ ca_server }}
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
- private_key: /etc/pki/influxdb.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
@@ -101,17 +97,13 @@ influxkeyperms:
redis_key:
x509.private_key_managed:
- name: /etc/pki/redis.key
- CN: {{ GLOBALS.hostname }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/redis.key') -%}
- prereq:
- x509: /etc/pki/redis.crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -122,7 +114,7 @@ redis_crt:
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: registry
- public_key: /etc/pki/redis.key
- private_key: /etc/pki/redis.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
@@ -146,21 +138,19 @@ rediskeyperms:
- group: 939
{% endif %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet'] %}
# Create cert for Elastic Fleet Host
etc_elasticfleet_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet.key
- CN: {{ COMMONNAME }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -171,7 +161,7 @@ etc_elasticfleet_crt:
- name: /etc/pki/elasticfleet.crt
- ca_server: {{ ca_server }}
- signing_policy: elasticfleet
- public_key: /etc/pki/elasticfleet.key
- private_key: /etc/pki/elasticfleet.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
@@ -199,50 +189,158 @@ efperms:
- mode: 640
- group: 939
chownilogstashelasticfleetp8:
chownelasticfleetcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet.p8
- name: /etc/pki/elasticfleet.crt
- mode: 640
- user: 947
- group: 939
# Create Symlinks to the keys so I can distribute it to all the things
chownelasticfleetkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet.key
- mode: 640
- user: 947
- group: 939
# Create Symlinks to the keys to distribute it to all the things
elasticfleetdircerts:
file.directory:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs
- makedirs: True
efkeylink:
file.symlink:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs/elasticfleet.p8
- target: /etc/pki/elasticfleet.p8
- user: socore
- group: socore
efcrtlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs/elasticfleet.crt
- target: /etc/pki/elasticfleet.crt
- user: socore
- group: socore
{% if grains.role not in ['so-fleet'] %}
# Create Cert for Elastic Fleet Logstash Input (Same cert used across all Fleet nodes)
etc_elasticfleetlogstash_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-logstash.key
- bits: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-logstash.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
etc_elasticfleetlogstash_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-logstash.crt
- ca_server: {{ ca_server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-logstash.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
- days_valid: 820
- backup: True
{% if grains.role not in ['so-heavynode'] %}
- unless:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
{% endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-logstash.key -topk8 -out /etc/pki/elasticfleet-logstash.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_key
eflogstashperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- group: 939
chownilogstashelasticfleetp8:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.p8
- mode: 640
- user: 947
- group: 939
chownilogstashelasticfleetlogstashcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.crt
- mode: 640
- user: 947
- group: 939
chownilogstashelasticfleetlogstashkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- user: 947
- group: 939
eflogstashkeylink:
file.symlink:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs/elasticfleet-logstash.p8
- target: /etc/pki/elasticfleet.p8
- user: socore
- group: socore
eflogstashcrtlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs/elasticfleet-logstash.crt
- target: /etc/pki/elasticfleet.crt
- user: socore
- group: socore
{% endif %}
/opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.p8:
file.managed:
- replace: True
- source: salt://elasticfleet/files/certs/elasticfleet-logstash.p8
- makedirs: True
- mode: 640
- user: 931
- group: 939
/opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.crt:
file.managed:
- replace: True
- source: salt://elasticfleet/files/certs/elasticfleet-logstash.crt
- makedirs: True
- mode: 640
- user: 931
- group: 939
{% endif %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
etc_filebeat_key:
x509.private_key_managed:
- name: /etc/pki/filebeat.key
- CN: {{ COMMONNAME }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/filebeat.key') -%}
- prereq:
- x509: etc_filebeat_crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -253,7 +351,7 @@ etc_filebeat_crt:
- name: /etc/pki/filebeat.crt
- ca_server: {{ ca_server }}
- signing_policy: filebeat
- public_key: /etc/pki/filebeat.key
- private_key: /etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
@@ -313,17 +411,13 @@ fbcrtlink:
registry_key:
x509.private_key_managed:
- name: /etc/pki/registry.key
- CN: {{ GLOBALS.manager }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/registry.key') -%}
- prereq:
- x509: /etc/pki/registry.crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -335,7 +429,7 @@ registry_crt:
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.manager }}, IP:{{ GLOBALS.manager_ip }}
- signing_policy: registry
- public_key: /etc/pki/registry.key
- private_key: /etc/pki/registry.key
- CN: {{ GLOBALS.manager }}
- days_remaining: 0
- days_valid: 820
@@ -361,17 +455,13 @@ regkeyperms:
# Create a cert for elasticsearch
/etc/pki/elasticsearch.key:
x509.private_key_managed:
- CN: {{ COMMONNAME }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -380,7 +470,7 @@ regkeyperms:
x509.certificate_managed:
- ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/elasticsearch.key
- private_key: /etc/pki/elasticsearch.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
@@ -418,17 +508,13 @@ elasticp12perms:
managerssl_key:
x509.private_key_managed:
- name: /etc/pki/managerssl.key
- CN: {{ GLOBALS.manager }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -439,7 +525,7 @@ managerssl_crt:
- name: /etc/pki/managerssl.crt
- ca_server: {{ ca_server }}
- signing_policy: managerssl
- public_key: /etc/pki/managerssl.key
- private_key: /etc/pki/managerssl.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
@@ -476,17 +562,13 @@ fbcertdir:
conf_filebeat_key:
x509.private_key_managed:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ COMMONNAME }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%}
- prereq:
- x509: conf_filebeat_crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -497,7 +579,7 @@ conf_filebeat_crt:
- name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
- ca_server: {{ ca_server }}
- signing_policy: filebeat
- public_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
- private_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
@@ -542,17 +624,13 @@ chownfilebeatp8:
# Create a cert for elasticsearch
/etc/pki/elasticsearch.key:
x509.private_key_managed:
- CN: {{ GLOBALS.manager }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- timeout: 30
- retry:
attempts: 5
interval: 30
@@ -561,7 +639,7 @@ chownfilebeatp8:
x509.certificate_managed:
- ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/elasticsearch.key
- private_key: /etc/pki/elasticsearch.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0

View File

@@ -205,11 +205,13 @@ filecheck_restart:
filecheck_run:
cron.present:
- name: 'ps -ef | grep filecheck | grep -v grep > /dev/null 2>&1 || python3 /opt/so/conf/strelka/filecheck >> /opt/so/log/strelka/filecheck_stdout.log 2>&1 &'
- identifier: filecheck_run
- user: {{ filecheck_runas }}
filcheck_history_clean:
cron.present:
- name: '/usr/bin/find /nsm/strelka/history/ -type f -mtime +2 -exec rm {} + > /dev/null 2>&1'
- identifier: filecheck_history_clean
- minute: '33'
# End Filecheck Section

View File

@@ -79,8 +79,10 @@ surilogscript:
- source: salt://suricata/cron/surilogcompress
- mode: 755
/usr/local/bin/surilogcompress:
surilogcompress:
cron.present:
- name: /usr/local/bin/surilogcompress
- identifier: surilogcompress
- user: suricata
- minute: '17'
- hour: '*'
@@ -181,16 +183,6 @@ delete_so-suricata_so-status.disabled:
- regex: ^so-suricata$
{% endif %}
surirotate:
cron.absent:
- name: /usr/local/bin/surirotate
- user: root
- minute: '11'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
so-suricata-eve-clean:
file.managed:
- name: /usr/sbin/so-suricata-eve-clean
@@ -204,6 +196,7 @@ so-suricata-eve-clean:
clean_suricata_eve_files:
cron.present:
- name: /usr/sbin/so-suricata-eve-clean > /dev/null 2>&1
- identifier: clean_suricata_eve_files
- user: root
- minute: '*/5'
- hour: '*'

View File

@@ -339,6 +339,18 @@ base:
- docker_clean
- idh
'*_fleet and G@saltversion:{{saltversion}}':
- match: compound
- ssl
- sensoroni
- telegraf
- firewall
- logstash
- elasticfleet
- elasticfleet.install_agent_grid
- schedule
- docker_clean
'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:Rocky )':
- match: compound
- workstation

View File

@@ -0,0 +1 @@
{% set ROLE_GLOBALS = {} %}

View File

@@ -1,8 +1,17 @@
{% from 'vars/sensor.map.jinja' import ROLE_GLOBALS %}
{% import_yaml 'zeek/defaults.yaml' as zeek_defaults with context %}
{% set zeek_pillar = salt['pillar.get']('zeek', []) %}
{% set ZEEKMERGED = salt['defaults.merge'](zeek_defaults, zeek_pillar, in_place=False) %}
{% do ZEEKMERGED.zeek.config.node.update({'interface': ROLE_GLOBALS.sensor.interface}) %}
{% set ZEEKMERGED = salt['pillar.get']('zeek', zeek_defaults.zeek, merge=True) %}
{% do ZEEKMERGED.config.node.update({'interface': ROLE_GLOBALS.sensor.interface}) %}
{% if ZEEKMERGED.config.local.load is defined %}
{% set LOCALLOAD = ZEEKMERGED.config.local.pop('load') %}
{% do ZEEKMERGED.config.local.update({'@load': LOCALLOAD}) %}
{% endif %}
{% if ZEEKMERGED.config.local['load-sigs'] is defined %}
{% set LOCALLOADSIGS = ZEEKMERGED.config.local.pop('load-sigs') %}
{% do ZEEKMERGED.config.local.update({'@load-sigs': LOCALLOADSIGS}) %}
{% endif %}
{% set ZEEKOPTIONS = {} %}
{% set ENABLED = salt['pillar.get']('zeek:enabled', True) %}

View File

@@ -21,8 +21,9 @@ zeek:
SpoolDir: /nsm/zeek/spool
CfgDir: /opt/zeek/etc
CompressLogs: 1
ZeekPort: 27760
local:
'@load':
load:
- misc/loaded-scripts
- tuning/defaults
- misc/capture-loss
@@ -68,7 +69,7 @@ zeek:
- zeek-plugin-profinet
- zeek-spicy-wireguard
- zeek-spicy-stun
'@load-sigs':
load-sigs:
- frameworks/signatures/detect-windows-shells
redef:
- LogAscii::use_json = T;

View File

@@ -2,9 +2,9 @@
{%- set ALLOWEDOPTIONS = [ '@load', '@load-sigs', 'redef' ] %}
{%- for k, v in LOCAL.items() %}
{%- for k, v in LOCAL.items() | sort %}
{%- if k|lower in ALLOWEDOPTIONS %}
{%- for li in v|sort %}
{%- for li in v %}
{{ k }} {{ li }}
{%- endfor %}
{%- endif %}

View File

@@ -78,7 +78,7 @@ zeekpolicysync:
- group: 939
- template: jinja
- defaults:
FILE_EXTRACTION: {{ ZEEKMERGED.zeek.file_extraction }}
FILE_EXTRACTION: {{ ZEEKMERGED.file_extraction }}
# Ensure the zeek spool tree (and state.db) ownership is correct
zeekspoolownership:
@@ -109,7 +109,7 @@ zeekctlcfg:
- group: 939
- template: jinja
- defaults:
ZEEKCTL: {{ ZEEKMERGED.zeek.config.zeekctl | tojson }}
ZEEKCTL: {{ ZEEKMERGED.config.zeekctl | tojson }}
# Sync node.cfg
nodecfg:
@@ -120,7 +120,7 @@ nodecfg:
- group: 939
- template: jinja
- defaults:
NODE: {{ ZEEKMERGED.zeek.config.node }}
NODE: {{ ZEEKMERGED.config.node }}
networkscfg:
file.managed:
@@ -130,7 +130,7 @@ networkscfg:
- group: 939
- template: jinja
- defaults:
NETWORKS: {{ ZEEKMERGED.zeek.config.networks }}
NETWORKS: {{ ZEEKMERGED.config.networks }}
#zeekcleanscript:
# file.managed:
@@ -198,7 +198,7 @@ localzeek:
- group: 939
- template: jinja
- defaults:
LOCAL: {{ ZEEKMERGED.zeek.config.local | tojson }}
LOCAL: {{ ZEEKMERGED.config.local | tojson }}
so-zeek:
docker_container.{{ ZEEKOPTIONS.status }}:
@@ -208,6 +208,7 @@ so-zeek:
- privileged: True
- ulimits:
- core=0
- nofile=1048576:1048576
- binds:
- /nsm/zeek/logs:/nsm/zeek/logs:rw
- /nsm/zeek/spool:/nsm/zeek/spool:rw

View File

@@ -5,11 +5,18 @@ zeek:
helpLink: zeek.html
config:
local:
'@load':
load:
description: List of Zeek policies to load
forcedType: "[]string"
helpLink: zeek.html
'@load-sigs':
load-sigs:
description: List of Zeek signatures to load
forcedType: "[]string"
helpLink: zeek.html
redef:
description: List of Zeek variables to redefine
forcedType: "[]string"
advanced: True
helpLink: zeek.html
node:
lb_procs:

Some files were not shown because too many files have changed in this diff Show More