Merge pull request #10 from Security-Onion-Solutions/dev

Dev Sync
This commit is contained in:
Masaya-A
2021-05-19 08:59:49 +09:00
committed by GitHub
126 changed files with 9987 additions and 1030 deletions

1
HOTFIX Normal file
View File

@@ -0,0 +1 @@
GRIDFIX

View File

@@ -1,6 +1,6 @@
## Security Onion 2.3.40
## Security Onion 2.3.50
Security Onion 2.3.40 is here!
Security Onion 2.3.50 is here!
## Screenshots

View File

@@ -1,16 +1,17 @@
### 2.3.40 ISO image built on 2021/03/22
### 2.3.50 ISO image built on 2021/04/27
### Download and Verify
2.3.40 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.40.iso
2.3.50 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso
MD5: FB72C0675F262A714B287BB33CE82504
SHA1: E8F5A9AA23990DF794611F9A178D88414F5DA81C
SHA256: DB125D6E770F75C3FD35ABE3F8A8B21454B7A7618C2B446D11B6AC8574601070
MD5: C39CEA68B5A8AFC5CFFB2481797C0374
SHA1: 00AD9F29ABE3AB495136989E62EBB8FA00DA82C6
SHA256: D77AE370D7863837A989F6735413D1DD46B866D8D135A4C363B0633E3990387E
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.40.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
@@ -24,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.40.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.40.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.3.40.iso.sig securityonion-2.3.40.iso
gpg --verify securityonion-2.3.50.iso.sig securityonion-2.3.50.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Mon 22 Mar 2021 09:35:50 AM EDT using RSA key ID FE507013
gpg: Signature made Tue 27 Apr 2021 02:17:25 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.3.50
2.3.60

View File

@@ -1,71 +0,0 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set MANAGER = salt['grains.get']('master') %}
airgapyum:
file.managed:
- name: /etc/yum/yum.conf
- source: salt://airgap/files/yum.conf
airgap_repo:
pkgrepo.managed:
- humanname: Airgap Repo
- baseurl: https://{{ MANAGER }}/repo
- gpgcheck: 1
- sslverify: 0
agbase:
file.absent:
- name: /etc/yum.repos.d/CentOS-Base.repo
agcr:
file.absent:
- name: /etc/yum.repos.d/CentOS-CR.repo
agdebug:
file.absent:
- name: /etc/yum.repos.d/CentOS-Debuginfo.repo
agfasttrack:
file.absent:
- name: /etc/yum.repos.d/CentOS-fasttrack.repo
agmedia:
file.absent:
- name: /etc/yum.repos.d/CentOS-Media.repo
agsources:
file.absent:
- name: /etc/yum.repos.d/CentOS-Sources.repo
agvault:
file.absent:
- name: /etc/yum.repos.d/CentOS-Vault.repo
agkernel:
file.absent:
- name: /etc/yum.repos.d/CentOS-x86_64-kernel.repo
agepel:
file.absent:
- name: /etc/yum.repos.d/epel.repo
agtesting:
file.absent:
- name: /etc/yum.repos.d/epel-testing.repo
agssrepo:
file.absent:
- name: /etc/yum.repos.d/saltstack.repo
agwazrepo:
file.absent:
- name: /etc/yum.repos.d/wazuh.repo
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -43,8 +43,9 @@ pki_private_key:
- require:
- file: /etc/pki
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
x509_pem_entries:
module.run:

View File

@@ -1 +1 @@
net.ipv4.ip_local_reserved_ports=55000,57314
net.ipv4.ip_local_reserved_ports=55000,57314,47760-47860

View File

@@ -0,0 +1,2 @@
{%- set VERSION = salt['pillar.get']('global:soversion') -%}
{{ VERSION }}

6
salt/common/files/vimrc Normal file
View File

@@ -0,0 +1,6 @@
" Activates filetype detection
filetype plugin indent on
" Sets .sls files to use YAML syntax highlighting
autocmd BufNewFile,BufRead *.sls set syntax=yaml
set number

View File

@@ -49,6 +49,11 @@ sosaltstackperms:
- gid: 939
- dir_mode: 770
so_log_perms:
file.directory:
- name: /opt/so/log
- dir_mode: 755
# Create a state directory
statedir:
file.directory:
@@ -64,15 +69,12 @@ salttmp:
- group: 939
- makedirs: True
# Install epel
{% if grains['os'] == 'CentOS' %}
repair_yumdb:
cmd.run:
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
- onlyif:
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
{% endif %}
# VIM config
vimconfig:
file.managed:
- name: /root/.vimrc
- source: salt://common/files/vimrc
- replace: False
# Install common packages
{% if grains['os'] != 'CentOS' %}
@@ -100,6 +102,8 @@ commonpkgs:
- python3-mysqldb
- python3-packaging
- git
- vim
heldpackages:
pkg.installed:
- pkgs:
@@ -138,6 +142,7 @@ commonpkgs:
- lvm2
- openssl
- git
- vim-enhanced
heldpackages:
pkg.installed:
@@ -230,6 +235,30 @@ commonlogrotateconf:
- month: '*'
- dayweek: '*'
# Create the status directory
sostatusdir:
file.directory:
- name: /opt/so/log/sostatus
- user: 0
- group: 0
- makedirs: True
sostatus_log:
file.managed:
- name: /opt/so/log/sostatus/status.log
- mode: 644
# Install sostatus check cron
'/usr/sbin/so-status -q; echo $? > /opt/so/log/sostatus/status.log 2>&1':
cron.present:
- user: root
- minute: '*/1'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% if role in ['eval', 'manager', 'managersearch', 'standalone'] %}
# Lock permissions on the backup directory
backupdir:
@@ -249,6 +278,14 @@ backupdir:
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% else %}
soversionfile:
file.managed:
- name: /etc/soversion
- source: salt://common/files/soversion
- mode: 644
- template: jinja
{% endif %}
# Manager daemon.json
@@ -266,9 +303,10 @@ docker:
- file: docker_daemon
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
# 55000 = Wazuh, 57314 = Strelka, 47760-47860 = Zeek
dockerapplyports:
cmd.run:
- name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314"; fi
- name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314,47760-47860"; fi
# Reserve OS ports for Docker proxy
dockerreserveports:

View File

@@ -0,0 +1,64 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
UPDATE_DIR=/tmp/sohotfixapply
if [ -z "$1" ]; then
echo "No tarball given. Please provide the filename so I can run the hotfix"
echo "so-airgap-hotfixapply /path/to/sohotfix.tar"
exit 1
else
if [ ! -f "$1" ]; then
echo "Unable to find $1. Make sure your path is correct and retry."
exit 1
else
echo "Determining if we need to apply this hotfix"
rm -rf $UPDATE_DIR
mkdir -p $UPDATE_DIR
tar xvf $1 -C $UPDATE_DIR
# Compare some versions
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
CURRENTHOTFIX=$(cat /etc/sohotfix)
INSTALLEDVERSION=$(cat /etc/soversion)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "Checking to see if there are hotfixes needed"
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
echo "You are already running the latest version of Security Onion."
rm -rf $UPDATE_DIR
exit 1
else
echo "We need to apply a hotfix"
copy_new_files
echo $HOTFIXVERSION > /etc/sohotfix
salt-call state.highstate -l info queue=True
echo "The Hotfix $HOTFIXVERSION has been applied"
# Clean up
rm -rf $UPDATE_DIR
exit 0
fi
else
echo "This hotfix is not compatible with your current version. Download the latest ISO and run soup"
rm -rf $UPDATE_DIR
fi
fi
fi

View File

@@ -0,0 +1,33 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Get the latest code
rm -rf /tmp/sohotfix
mkdir -p /tmp/sohotfix
cd /tmp/sohotfix
git clone https://github.com/Security-Onion-Solutions/securityonion
if [ ! -d "/tmp/sohotfix/securityonion" ]; then
echo "I was unable to get the latest code. Check your internet and try again."
exit 1
else
echo "Looks like we have the code lets create the tarball."
cd /tmp/sohotfix/securityonion
tar cvf /tmp/sohotfix/sohotfix.tar HOTFIX VERSION salt pillar
echo ""
echo "Copy /tmp/sohotfix/sohotfix.tar to portable media and then copy it to your airgap manager."
exit 0
fi

View File

@@ -15,6 +15,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_SALT_DIR=/opt/so/saltstack/default
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
@@ -122,6 +124,20 @@ check_elastic_license() {
fi
}
copy_new_files() {
# Copy new files over to the salt dir
cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/
rsync -a pillar $DEFAULT_SALT_DIR/
chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp
}
disable_fastestmirror() {
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
}
elastic_license() {
read -r -d '' message <<- EOM
@@ -165,9 +181,9 @@ get_random_value() {
gpg_rpm_import() {
if [[ "$OS" == "centos" ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
local RPMKEYSLOC="../salt/common/keys"
local RPMKEYSLOC="../salt/repo/client/files/centos/keys"
else
local RPMKEYSLOC="$UPDATEDIR/salt/common/keys"
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/centos/keys"
fi
RPMKEYS=('RPM-GPG-KEY-EPEL-7' 'GPG-KEY-WAZUH' 'docker.pub' 'SALTSTACK-GPG-KEY.pub' 'securityonion.pub')

View File

@@ -35,6 +35,7 @@ if [ ! -f $BACKUPFILE ]; then
{%- endfor %}
tar -rf $BACKUPFILE /etc/pki
tar -rf $BACKUPFILE /etc/salt
tar -rf $BACKUPFILE /opt/so/conf/kratos
fi

View File

@@ -60,15 +60,19 @@ def main(quiet):
no_prunable = True
for t_list in grouped_tag_lists:
try:
# Keep the 2 most current images
# Group tags by version, in case multiple images exist with the same version string
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
if len(t_list) <= 2:
grouped_t_list = [ list(it) for _,it in groupby(t_list, lambda x: get_image_version(x)) ]
# Keep the 2 most current version groups
if len(grouped_t_list) <= 2:
continue
else:
no_prunable = False
for tag in t_list[2:]:
if not quiet: print(f'Removing image {tag}')
client.images.remove(tag)
for group in grouped_t_list[2:]:
for tag in group:
if not quiet: print(f'Removing image {tag}')
client.images.remove(tag)
except InvalidVersion as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e.args[0]}', file=sys.stderr)
exit(1)

View File

@@ -0,0 +1,51 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
wdurregex="^[0-9]+w$"
ddurregex="^[0-9]+d$"
echo -e "\nThis script is used to reduce the size of InfluxDB by removing old data and retaining only the duration specified."
echo "The duration will need to be specified as an integer followed by the duration unit without a space."
echo -e "\nFor example, to purge all data but retain the past 12 weeks, specify 12w for the duration."
echo "The duration units are as follows:"
echo " w - week(s)"
echo " d - day(s)"
while true; do
echo ""
read -p 'Enter the duration of past data that you would like to retain: ' duration
duration=$(echo $duration | tr '[:upper:]' '[:lower:]')
if [[ "$duration" =~ $wdurregex ]] || [[ "$duration" =~ $ddurregex ]]; then
break
fi
echo -e "\nInvalid duration."
done
echo -e "\nInfluxDB will now be cleaned and leave only the past $duration worth of data."
read -r -p "Are you sure you want to continue? [y/N] " yorn
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
echo -e "\nCleaning InfluxDb and saving only the past $duration. This may could take several minutes depending on how much data needs to be cleaned."
if docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"DELETE FROM /.*/ WHERE \"time\" >= '2020-01-01T00:00:00.0000000Z' AND \"time\" <= now() - $duration\""; then
echo -e "\nInfluxDb clean complete."
else
echo -e "\nSomething went wrong with cleaning InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
fi
else
echo -e "\nExiting as requested."
fi

View File

@@ -0,0 +1,47 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
echo -e "\nThis script is used to reduce the size of InfluxDB by downsampling old data into the so_long_term retention policy."
echo -e "\nInfluxDB will now be migrated. This could take a few hours depending on how large the database is and hardware resources available."
read -r -p "Are you sure you want to continue? [y/N] " yorn
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
echo -e "\nMigrating InfluxDb started at `date`. This may take several hours depending on how much data needs to be moved."
day=0
startdate=`date`
while docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"SELECT mean(*) INTO \"so_long_term\".:MEASUREMENT FROM \"autogen\"./.*/ WHERE \"time\" >= '2020-07-21T00:00:00.0000000Z' + ${day}d AND \"time\" <= '2020-07-21T00:00:00.0000000Z' + $((day+1))d GROUP BY time(5m),*\""; do
# why 2020-07-21?
migrationdate=`date -d "2020-07-21 + ${day} days" +"%y-%m-%d"`
echo "Migration of $migrationdate started at $startdate and completed at `date`."
newdaytomigrate=$(date -d "$migrationdate + 1 days" +"%s")
today=$(date +"%s")
if [ $newdaytomigrate -ge $today ]; then
break
else
((day=day+1))
startdate=`date`
echo -e "\nMigrating the next day's worth of data."
fi
done
echo -e "\nInfluxDb data migration complete."
else
echo -e "\nExiting as requested."
fi

View File

@@ -22,5 +22,5 @@ salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create
/usr/sbin/so-soctopus-restart
echo "Importing Plays - this will take some time...."
wait 5
/usr/sbin/so-playbook-ruleupdate
sleep 5
/usr/sbin/so-playbook-ruleupdate

View File

@@ -19,6 +19,6 @@
# Check to see if we are already running
IS_RUNNING=$(ps aux | pgrep -f "so-playbook-sync" | wc -l)
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - Multiple Playbook Sync processes already running...exiting." && exit 0
[ "$IS_RUNNING" -gt 3 ] && echo "$(date) - Multiple Playbook Sync processes already running...exiting." && exit 0
docker exec so-soctopus python3 playbook_play-sync.py

View File

@@ -17,18 +17,6 @@
. /usr/sbin/so-common
#check_boss_raid() {
# BOSSBIN=/opt/boss/mvcli
# BOSSRC=$($BOSSBIN info -o vd | grep functional)
#
# if [[ $BOSSRC ]]; then
# # Raid is good
# BOSSRAID=0
# else
# BOSSRAID=1
# fi
#}
check_lsi_raid() {
# For use for LSI on Ubuntu
#MEGA=/opt/MegaRAID/MegeCli/MegaCli64
@@ -66,11 +54,11 @@ mkdir -p /opt/so/log/raid
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
#check_boss_raid
check_software_raid
echo "osraid=$BOSSRAID nsmraid=$SWRAID" > /opt/so/log/raid/status.log
echo "nsmraid=$SWRAID" > /opt/so/log/raid/status.log
{%- elif grains['sosmodel'] in ['SOS1000F', 'SOS1000', 'SOSSN7200', 'SOS10K', 'SOS4000'] %}
#check_boss_raid
check_lsi_raid
echo "osraid=$BOSSRAID nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
echo "nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
{%- else %}
exit 0
{%- endif %}

View File

@@ -116,7 +116,7 @@ clean() {
# Check to see if we are already running
IS_RUNNING=$(ps aux | pgrep -f "so-sensor-clean" | wc -l)
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
[ "$IS_RUNNING" -gt 3 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
while [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; do

View File

@@ -4,90 +4,184 @@
if [[ $1 =~ ^(-q|--quiet) ]]; then
quiet=true
elif [[ $1 =~ ^(-v|--verbose) ]]; then
verbose=true
fi
sshd_config=/etc/ssh/sshd_config
temp_config=/tmp/sshd_config
before=
after=
reload_required=false
change_header_printed=false
print_sshd_t() {
check_sshd_t() {
local string=$1
local state=$2
echo "${state}:"
local grep_out
grep_out=$(sshd -T | grep "^${string}")
if [[ $state == "Before" ]]; then
before=$grep_out
before=$grep_out
}
print_diff() {
local diff
diff=$(diff -dbB <(echo $before) <(echo $after) | awk 'NR>1')
if [[ -n $diff ]]; then
if [[ $change_header_printed == false ]]; then
printf '%s\n' '' "Changes" '-------' ''
change_header_printed=true
fi
echo -e "$diff\n"
fi
}
replace_or_add() {
local type=$1
local string=$2
if grep -q "$type" $temp_config; then
sed -i "/$type .*/d" $temp_config
fi
printf "%s\n\n" "$string" >> $temp_config
reload_required=true
}
test_config() {
local msg
msg=$(sshd -t -f $temp_config)
local ret=$?
if [[ -n $msg ]]; then
echo "Error found in temp sshd config:"
echo $msg
fi
return $ret
}
main() {
if ! [[ $quiet ]]; then echo "Copying current config to $temp_config"; fi
cp $sshd_config $temp_config
# Add newline to ssh for legibility
echo "" >> $temp_config
# Ciphers
check_sshd_t "ciphers"
local bad_ciphers=(
"3des-cbc"
"aes128-cbc"
"aes192-cbc"
"aes256-cbc"
"arcfour"
"arcfour128"
"arcfour256"
"blowfish-cbc"
"cast128-cbc"
)
local cipher_string=$before
for cipher in "${bad_ciphers[@]}"; do
cipher_string=$(echo "$cipher_string" | sed "s/${cipher}\(,\|\$\)//g" | sed 's/,$//')
done
after=$cipher_string
if [[ $verbose ]]; then print_diff; fi
if [[ $before != "$after" ]]; then
replace_or_add "ciphers" "$cipher_string" && test_config || exit 1
fi
# KexAlgorithms
check_sshd_t "kexalgorithms"
local bad_kexalgs=(
"diffie-hellman-group-exchange-sha1"
"diffie-hellman-group-exchange-sha256"
"diffie-hellman-group1-sha1"
"diffie-hellman-group14-sha1"
"ecdh-sha2-nistp256"
"ecdh-sha2-nistp521"
"ecdh-sha2-nistp384"
)
local kexalg_string=$before
for kexalg in "${bad_kexalgs[@]}"; do
kexalg_string=$(echo "$kexalg_string" | sed "s/${kexalg}\(,\|\$\)//g" | sed 's/,$//')
done
after=$kexalg_string
if [[ $verbose ]]; then print_diff; fi
if [[ $before != "$after" ]]; then
replace_or_add "kexalgorithms" "$kexalg_string" && test_config || exit 1
fi
# Macs
check_sshd_t "macs"
local bad_macs=(
"hmac-sha2-512"
"umac-128@openssh.com"
"hmac-sha2-256"
"umac-64@openssh.com"
"hmac-sha1"
"hmac-sha1-etm@openssh.com"
"umac-64-etm@openssh.com"
)
local macs_string=$before
for mac in "${bad_macs[@]}"; do
macs_string=$(echo "$macs_string" | sed "s/${mac}\(,\|\$\)//g" | sed 's/,$//')
done
after=$macs_string
if [[ $verbose ]]; then print_diff; fi
if [[ $before != "$after" ]]; then
replace_or_add "macs" "$macs_string" && test_config || exit 1
fi
# HostKeyAlgorithms
check_sshd_t "hostkeyalgorithms"
local optional_suffix_regex_hka="\(-cert-v01@openssh.com\)\?"
local bad_hostkeyalg_list=(
"ecdsa-sha2-nistp256"
"ecdsa-sha2-nistp384"
"ecdsa-sha2-nistp521"
"ssh-rsa"
"ssh-dss"
)
local hostkeyalg_string=$before
for alg in "${bad_hostkeyalg_list[@]}"; do
hostkeyalg_string=$(echo "$hostkeyalg_string" | sed "s/${alg}${optional_suffix_regex_hka}\(,\|\$\)//g" | sed 's/,$//')
done
after=$hostkeyalg_string
if [[ $verbose ]]; then print_diff; fi
if [[ $before != "$after" ]]; then
replace_or_add "hostkeyalgorithms" "$hostkeyalg_string" && test_config || exit 1
fi
if [[ $reload_required == true ]]; then
mv -f $temp_config $sshd_config
if ! [[ $quiet ]]; then echo "Reloading sshd to load config changes"; fi
systemctl reload sshd
echo "[ WARNING ] Any new ssh sessions will need to remove and reaccept the host key fingerprint for this server before reconnecting."
else
after=$grep_out
fi
echo $grep_out
}
print_msg() {
local msg=$1
if ! [[ $quiet ]]; then
printf "%s\n" \
"----" \
"$msg" \
"----" \
""
if ! [[ $quiet ]]; then echo "No changes made to temp file, cleaning up"; fi
rm -f $temp_config
fi
}
if ! [[ $quiet ]]; then print_sshd_t "ciphers" "Before"; fi
sshd -T | grep "^ciphers" | sed -e "s/\(3des-cbc\|aes128-cbc\|aes192-cbc\|aes256-cbc\|arcfour\|arcfour128\|arcfour256\|blowfish-cbc\|cast128-cbc\|rijndael-cbc@lysator.liu.se\)\,\?//g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then
print_sshd_t "ciphers" "After"
echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi
if ! [[ $quiet ]]; then print_sshd_t "kexalgorithms" "Before"; fi
sshd -T | grep "^kexalgorithms" | sed -e "s/\(diffie-hellman-group14-sha1\|ecdh-sha2-nistp256\|diffie-hellman-group-exchange-sha256\|diffie-hellman-group1-sha1\|diffie-hellman-group-exchange-sha1\|ecdh-sha2-nistp521\|ecdh-sha2-nistp384\)\,\?//g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then
print_sshd_t "kexalgorithms" "After"
echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi
if ! [[ $quiet ]]; then print_sshd_t "macs" "Before"; fi
sshd -T | grep "^macs" | sed -e "s/\(hmac-sha2-512,\|umac-128@openssh.com,\|hmac-sha2-256,\|umac-64@openssh.com,\|hmac-sha1,\|hmac-sha1-etm@openssh.com,\|umac-64-etm@openssh.com,\|hmac-sha1\)//g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then
print_sshd_t "macs" "After"
echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi
if ! [[ $quiet ]]; then print_sshd_t "hostkeyalgorithms" "Before"; fi
sshd -T | grep "^hostkeyalgorithms" | sed "s|ecdsa-sha2-nistp256,||g" | sed "s|ssh-rsa,||g" >> /etc/ssh/sshd_config
if ! [[ $quiet ]]; then
print_sshd_t "hostkeyalgorithms" "After"
echo ""
fi
if [[ $before != $after ]]; then
reload_required=true
fi
if [[ $reload_required == true ]]; then
print_msg "Reloading sshd to load config changes..."
systemctl reload sshd
fi
{% if grains['os'] != 'CentOS' %}
print_msg "[ WARNING ] Any new ssh sessions will need to remove and reaccept the ECDSA key for this server before reconnecting."
{% endif %}
main

View File

@@ -21,9 +21,9 @@ UPDATE_DIR=/tmp/sogh/securityonion
INSTALLEDVERSION=$(cat /etc/soversion)
POSTVERSION=$INSTALLEDVERSION
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
DEFAULT_SALT_DIR=/opt/so/saltstack/default
BATCHSIZE=5
SOUP_LOG=/root/soup.log
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
WHATWOULDYOUSAYYAHDOHERE=soup
add_common() {
@@ -161,6 +161,34 @@ check_log_size_limit() {
fi
}
check_os_updates() {
# Check to see if there are OS updates
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
if [[ $OS == 'ubuntu' ]]; then
OSUPDATES=$(apt list --upgradeable | grep -v "^Listing..." | grep -v "^docker-ce" | grep -v "^wazuh-" | grep -v "^salt-" | wc -l)
else
OSUPDATES=$(yum -q list updates | wc -l)
fi
if [[ "$OSUPDATES" -gt 0 ]]; then
echo $NEEDUPDATES
echo ""
read -p "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
if [[ "$confirm" == [cC] ]]; then
echo "Continuing without updating packages"
elif [[ "$confirm" == [uU] ]]; then
echo "Applying Grid Updates"
salt \* -b 5 state.apply patch.os queue=True
else
echo "Exiting soup"
exit 0
fi
else
echo "Looks like you have an updated OS"
fi
}
clean_dockers() {
# Place Holder for cleaning up old docker images
echo "Trying to clean up old dockers."
@@ -186,16 +214,6 @@ clone_to_tmp() {
fi
}
copy_new_files() {
# Copy new files over to the salt dir
cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/
rsync -a pillar $DEFAULT_SALT_DIR/
chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp
}
generate_and_clean_tarballs() {
local new_version
new_version=$(cat $UPDATE_DIR/VERSION)
@@ -230,6 +248,13 @@ masterunlock() {
fi
}
preupgrade_changes_2.3.50_repo() {
# We made repo changes in 2.3.50 and this prepares for that on upgrade
echo "Checking to see if 2.3.50 repo changes are needed."
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50_repo
}
preupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if changes are needed."
@@ -239,6 +264,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
}
postupgrade_changes() {
@@ -248,6 +274,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" =~ rc.1 ]] && post_rc1_to_rc2
[[ "$POSTVERSION" == 2.3.20 || "$POSTVERSION" == 2.3.21 ]] && post_2.3.2X_to_2.3.30
[[ "$POSTVERSION" == 2.3.30 ]] && post_2.3.30_to_2.3.40
[[ "$POSTVERSION" == 2.3.50 ]] && post_2.3.5X_to_2.3.60
}
post_rc1_to_2.3.21() {
@@ -268,6 +295,10 @@ post_2.3.30_to_2.3.40() {
POSTVERSION=2.3.40
}
post_2.3.5X_to_2.3.60() {
POSTVERSION=2.3.60
}
rc1_to_rc2() {
@@ -409,6 +440,64 @@ up_2.3.2X_to_2.3.30() {
sed -i "/^strelka:/a \\ repos: \n - https://github.com/Neo23x0/signature-base" /opt/so/saltstack/local/pillar/global.sls;
fi
check_log_size_limit
INSTALLEDVERSION=2.3.30
}
up_2.3.3X_to_2.3.50_repo() {
echo "Performing 2.3.50 repo actions."
if [[ "$OS" == "centos" ]]; then
# Import GPG Keys
gpg_rpm_import
echo "Disabling fastestmirror."
disable_fastestmirror
echo "Deleting unneeded repo files."
DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'wazuh')
for DELREPO in "${DELREPOS[@]}"; do
if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then
echo "Deleting $DELREPO.repo"
rm -f "/etc/yum.repos.d/$DELREPO.repo"
fi
done
if [ $is_airgap -eq 1 ]; then
# Copy the new repo file if not airgap
cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
yum clean all
yum repolist
fi
fi
}
up_2.3.3X_to_2.3.50() {
cat <<EOF > /tmp/supersed.txt
/so-zeek:/ {
p;
n;
/shards:/ {
p;
n;
/warm:/ {
p;
n;
/close:/ {
s/close: 365/close: 45/;
p;
n;
/delete:/ {
s/delete: 45/delete: 365/;
p;
d;
}
}
}
}
}
p;
EOF
sed -n -i -f /tmp/supersed.txt /opt/so/saltstack/local/pillar/global.sls
rm /tmp/supersed.txt
INSTALLEDVERSION=2.3.50
}
verify_upgradespace() {
@@ -478,16 +567,28 @@ update_version() {
# Update the version to the latest
echo "Updating the Security Onion version file."
echo $NEWVERSION > /etc/soversion
echo $HOTFIXVERSION > /etc/sohotfix
sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global.sls
}
upgrade_check() {
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
CURRENTHOTFIX=$(cat /etc/sohotfix 2>/dev/null)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
echo "Checking to see if there are hotfixes needed"
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
else
echo "We need to apply a hotfix"
is_hotfix=true
fi
else
is_hotfix=false
fi
}
upgrade_check_salt() {
@@ -503,22 +604,18 @@ upgrade_salt() {
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
if [[ $OS == 'centos' ]]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
if [ $is_airgap -eq 0 ]; then
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
else
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
fi
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
elif [[ $OS == 'ubuntu' ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
@@ -598,7 +695,7 @@ else
rm -rf $UPDATE_DIR
clone_to_tmp
fi
check_os_updates
echo ""
echo "Verifying we have the latest soup script."
verify_latest_update_script
@@ -619,143 +716,169 @@ upgrade_space
echo "Checking for Salt Master and Minion updates."
upgrade_check_salt
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
echo ""
echo "Updating dockers to $NEWVERSION."
if [ $is_airgap -eq 0 ]; then
airgap_update_dockers
else
update_registry
update_docker_containers "soup"
fi
echo ""
echo "Stopping Salt Minion service."
systemctl stop salt-minion
echo "Killing any remaining Salt Minion processes."
pkill -9 -ef /usr/bin/salt-minion
echo ""
echo "Stopping Salt Master service."
systemctl stop salt-master
echo ""
# Does salt need upgraded. If so update it.
if [ "$UPGRADESALT" == "1" ]; then
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
if [ "$is_hotfix" == "true" ]; then
echo "Applying $HOTFIXVERSION"
copy_new_files
echo ""
update_version
salt-call state.highstate -l info queue=True
else
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
echo ""
echo "Updating dockers to $NEWVERSION."
if [ $is_airgap -eq 0 ]; then
airgap_update_dockers
update_centos_repo
yum clean all
check_os_updates
else
update_registry
update_docker_containers "soup"
fi
upgrade_salt
fi
echo "Checking if Salt was upgraded."
echo ""
# Check that Salt was upgraded
if [[ $(salt --versions-report | grep Salt: | awk {'print $2'}) != "$NEWSALTVERSION" ]]; then
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 1
else
echo "Salt upgrade success."
echo "Stopping Salt Minion service."
systemctl stop salt-minion
echo "Killing any remaining Salt Minion processes."
pkill -9 -ef /usr/bin/salt-minion
echo ""
fi
preupgrade_changes
echo ""
if [ $is_airgap -eq 0 ]; then
echo "Updating Rule Files to the Latest."
update_airgap_rules
fi
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 ]] && [[ "$UPGRADESALT" != "1" ]]; then
update_centos_repo
fi
echo ""
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
copy_new_files
echo ""
update_version
echo ""
echo "Locking down Salt Master for upgrade"
masterlock
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
# Only regenerate osquery packages if Fleet is enabled
FLEET_MANAGER=$(lookup_pillar fleet_manager)
FLEET_NODE=$(lookup_pillar fleet_node)
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
echo "Stopping Salt Master service."
systemctl stop salt-master
echo ""
echo "Regenerating Osquery Packages.... This will take several minutes."
salt-call state.apply fleet.event_gen-packages -l info queue=True
preupgrade_changes_2.3.50_repo
# Does salt need upgraded. If so update it.
if [ "$UPGRADESALT" == "1" ]; then
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
fi
echo "Checking if Salt was upgraded."
echo ""
fi
# Check that Salt was upgraded
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk {'print $2'})
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 1
else
echo "Salt upgrade success."
echo ""
fi
echo ""
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
echo ""
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
echo ""
echo "Stopping Salt Master to remove ACL"
systemctl stop salt-master
masterunlock
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
echo "Running a highstate. This could take several minutes."
salt-call state.highstate -l info queue=True
postupgrade_changes
unmount_update
thehive_maint
if [ "$UPGRADESALT" == "1" ]; then
preupgrade_changes
echo ""
echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
if [ $is_airgap -eq 0 ]; then
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' cmd.run "yum clean all"
echo "Updating Rule Files to the Latest."
update_airgap_rules
fi
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion queue=True
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 ]] && [[ "$UPGRADESALT" != "1" ]]; then
update_centos_repo
fi
echo ""
fi
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
copy_new_files
echo ""
update_version
check_sudoers
echo ""
echo "Locking down Salt Master for upgrade"
masterlock
if [[ -n $lsl_msg ]]; then
case $lsl_msg in
'distributed')
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
echo " -> We recommend checking and adjusting the values as necessary."
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
;;
'single-node')
# We can assume the lsl_details array has been set if lsl_msg has this value
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
echo " -> We recommend checking and adjusting the value as necessary."
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
;;
esac
fi
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
# Testing that that salt-master is up by checking that is it connected to itself
retry 50 10 "salt-call state.show_top -l error" || exit 1
if [ $NUM_MINIONS -gt 1 ]; then
echo ""
echo "Ensuring python modules for Salt are installed and patched."
salt-call state.apply salt.python3-influxdb -l info queue=True
echo ""
cat << EOF
# Only regenerate osquery packages if Fleet is enabled
FLEET_MANAGER=$(lookup_pillar fleet_manager)
FLEET_NODE=$(lookup_pillar fleet_node)
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
echo ""
echo "Regenerating Osquery Packages.... This will take several minutes."
salt-call state.apply fleet.event_gen-packages -l info queue=True
echo ""
fi
echo ""
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
echo ""
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
echo ""
echo "Stopping Salt Master to remove ACL"
systemctl stop salt-master
masterunlock
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
# Testing that that salt-master is up by checking that is it connected to itself
retry 50 10 "salt-call state.show_top -l error" || exit 1
echo "Running a highstate. This could take several minutes."
salt-call state.highstate -l info queue=True
postupgrade_changes
unmount_update
thehive_maint
if [ "$UPGRADESALT" == "1" ]; then
if [ $is_airgap -eq 0 ]; then
echo ""
echo "Cleaning repos on remote Security Onion nodes."
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
echo ""
fi
fi
check_sudoers
if [[ -n $lsl_msg ]]; then
case $lsl_msg in
'distributed')
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
echo " -> We recommend checking and adjusting the values as necessary."
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
;;
'single-node')
# We can assume the lsl_details array has been set if lsl_msg has this value
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
echo " -> We recommend checking and adjusting the value as necessary."
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
;;
esac
fi
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
if [ $NUM_MINIONS -gt 1 ]; then
cat << EOF
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
@@ -763,9 +886,12 @@ Each minion is on a random 15 minute check-in period and things like network ban
If it looks like youre missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
For more information, please see https://docs.securityonion.net/en/2.3/soup.html#distributed-deployments.
EOF
fi
fi
echo "### soup has been served at `date` ###"
}
@@ -777,8 +903,6 @@ Please review the following for more information about the update process and re
https://docs.securityonion.net/soup
https://blog.securityonion.net
Please note that soup only updates Security Onion components and does NOT update the underlying operating system (OS). When you installed Security Onion, there was an option to automatically update the OS packages. If you did not enable this option, then you will want to ensure that the OS is fully updated before running soup.
Press Enter to continue or Ctrl-C to cancel.
EOF

View File

@@ -34,7 +34,7 @@ overlimit() {
closedindices() {
INDICES=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
INDICES=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
[ $? -eq 1 ] && return false
echo ${INDICES} | grep -q -E "(logstash-|so-)"
}
@@ -49,12 +49,12 @@ while overlimit && closedindices; do
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
OLDEST_INDEX=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
curl -XDELETE -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
done
done

View File

@@ -51,7 +51,7 @@
},
{ "set": { "field": "_index", "value": "so-firewall", "override": true } },
{ "set": { "if": "ctx.network?.transport_id == '0'", "field": "network.transport", "value": "icmp", "override": true } },
{"community_id": { "if": "ctx.network?.transport != null", "field":["source.ip","source.port","destination.ip","destination.port","network.transport"],"target_field":"network.community_id"}},
{"community_id": {} },
{ "set": { "field": "module", "value": "pfsense", "override": true } },
{ "set": { "field": "dataset", "value": "firewall", "override": true } },
{ "remove": { "field": ["real_message", "ip_sub_msg", "firewall.sub_message"], "ignore_failure": true } }

View File

@@ -9,6 +9,7 @@
{ "rename": { "if": "!(ctx.error?.eventdata_parsing == true)", "field": "unparsed.EventData", "target_field": "winlog.event_data", "ignore_missing": true, "ignore_failure": true } },
{ "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } },
{ "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } },
{ "rename": { "field": "winlog.datetime", "target_field": "winlog.systemTime", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational' && ctx.containsKey('winlog')", "name":"win.eventlogs" } },
{ "set": { "field": "event.module", "value": "osquery", "override": false } },

View File

@@ -1,7 +1,6 @@
{
"description" : "sysmon",
"processors" : [
{"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}},
{ "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } },
{ "set": { "field": "event.module", "value": "sysmon", "override": true } },
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
@@ -64,6 +63,7 @@
{ "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.TargetFilename", "target_field": "file.target", "ignore_missing": true } }
{ "rename": { "field": "winlog.event_data.TargetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "community_id": {} }
]
}

View File

@@ -4,8 +4,8 @@
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } },
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } },
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
{ "rename": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "target_field": "ingest.timestamp", "ignore_missing": true } },
{ "set": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "value": "{{winlog.systemTime}}", "override": true } },
{ "rename": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "target_field": "event.ingested", "ignore_missing": true } },
{ "date": { "if": "ctx.winlog?.systemTime != null", "field": "winlog.systemTime", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS'Z'","yyyy-MM-dd'T'HH:mm:ss.SSSSSSS'Z'"] } },
{ "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } },
{ "set": { "field": "event.category", "value": "host", "override": true } },
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_failure": true, "ignore_missing": true } },

View File

@@ -8,11 +8,11 @@
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
{"community_id": {"if": "ctx.network?.transport != null", "field":["message2.id.orig_h","message2.id.orig_p","message2.id.resp_h","message2.id.resp_p","network.transport"],"target_field":"network.community_id"}},
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
{ "community_id": {} },
{ "set": { "if": "ctx.source?.ip != null", "field": "client.ip", "value": "{{source.ip}}" } },
{ "set": { "if": "ctx.source?.port != null", "field": "client.port", "value": "{{source.port}}" } },
{ "set": { "if": "ctx.destination?.ip != null", "field": "server.ip", "value": "{{destination.ip}}" } },

View File

@@ -228,7 +228,11 @@
"event":{
"type":"object",
"dynamic": true
},
},
"event_data":{
"type":"object",
"dynamic": true
},
"file":{
"type":"object",
"dynamic": true
@@ -316,7 +320,8 @@
"type":"text",
"fields":{
"keyword":{
"type":"keyword"
"type":"keyword",
"ignore_above": 32766
}
}
},

View File

@@ -493,12 +493,13 @@ setup.template.enabled: false
# append ?pretty to the URL.
# Defines if the HTTP endpoint is enabled.
#http.enabled: false
http.enabled: true
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
#http.host: localhost
http.host: 0.0.0.0
# Port on which the HTTP endpoint will bind. Default is 5066.
http.port: 5066
queue.mem.events: {{ FBMEMEVENTS }}
queue.mem.flush.min_events: {{ FBMEMFLUSHMINEVENTS }}

View File

@@ -74,6 +74,7 @@ so-filebeat:
- port_bindings:
- 0.0.0.0:514:514/udp
- 0.0.0.0:514:514/tcp
- 0.0.0.0:5066:5066/tcp
- watch:
- file: /opt/so/conf/filebeat/etc/filebeat.yml

View File

@@ -18,6 +18,9 @@ firewall:
beats_5644:
tcp:
- 5644
beats_5066:
tcp:
- 5066
cortex:
tcp:
- 9001

View File

@@ -352,7 +352,7 @@
],
"measurement": "zeekcaptureloss",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -2176,7 +2176,7 @@
],
"measurement": "docker_container_mem",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [

View File

@@ -1647,7 +1647,7 @@
],
"measurement": "influxsize",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -4322,139 +4322,6 @@
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 6,
"w": 8,
"x": 16,
"y": 31
},
"hiddenSeries": false,
"id": 76,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.3.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "EPS",
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "esteps",
"orderByTime": "ASC",
"policy": "default",
"queryType": "randomWalk",
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"eps"
],
"type": "field"
},
{
"params": [],
"type": "mean"
}
]
],
"tags": [
{
"key": "host",
"operator": "=",
"value": "{{ SERVERNAME }}"
}
]
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "{{ SERVERNAME }} - Estimated EPS",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": "EPS",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": false,

View File

@@ -1631,7 +1631,7 @@
],
"measurement": "influxsize",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -5157,7 +5157,7 @@
"type": "fill"
}
],
"measurement": "esteps",
"measurement": "consumptioneps",
"orderByTime": "ASC",
"policy": "default",
"queryType": "randomWalk",

View File

@@ -351,7 +351,7 @@
],
"measurement": "zeekcaptureloss",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -2866,7 +2866,7 @@
],
"measurement": "healthcheck",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [

View File

@@ -4486,7 +4486,7 @@
],
"measurement": "zeekcaptureloss",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -5107,7 +5107,7 @@
],
"measurement": "influxsize",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -5562,7 +5562,7 @@
"type": "fill"
}
],
"measurement": "esteps",
"measurement": "consumptioneps",
"orderByTime": "ASC",
"policy": "default",
"queryType": "randomWalk",

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
influxdb:
retention_policies:
so_short_term:
default: True
duration: 30d
shard_duration: 1d
so_long_term:
default: False
duration: 0d
shard_duration: 7d
downsample:
so_long_term:
resolution: 5m

View File

@@ -233,7 +233,7 @@
# enabled = true
# Determines whether the Flux query endpoint is enabled.
# flux-enabled = false
flux-enabled = true
# The bind address used by the HTTP service.
# bind-address = ":8086"

View File

@@ -2,11 +2,21 @@
{% if sls in allowed_states %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% import_yaml 'influxdb/defaults.yaml' as default_settings %}
{% set influxdb = salt['grains.filter_by'](default_settings, default='influxdb', merge=salt['pillar.get']('influxdb', {})) %}
{% from 'salt/map.jinja' import PYTHON3INFLUX with context %}
{% from 'salt/map.jinja' import PYTHONINFLUXVERSION with context %}
{% set PYTHONINFLUXVERSIONINSTALLED = salt['cmd.run']("python3 -c \"exec('try:import influxdb; print (influxdb.__version__)\\nexcept:print(\\'Module Not Found\\')')\"", python_shell=True) %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
include:
- salt.minion
- salt.python3-influxdb
# Influx DB
influxconfdir:
@@ -57,6 +67,70 @@ append_so-influxdb_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-influxdb
# We have to make sure the influxdb module is the right version prior to state run since reload_modules is bugged
{% if PYTHONINFLUXVERSIONINSTALLED == PYTHONINFLUXVERSION %}
wait_for_influxdb:
http.query:
- name: 'https://{{MANAGER}}:8086/query?q=SHOW+DATABASES'
- ssl: True
- verify_ssl: False
- status: 200
- timeout: 30
- retry:
attempts: 5
interval: 60
telegraf_database:
influxdb_database.present:
- name: telegraf
- database: telegraf
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ MANAGER }}
- require:
- docker_container: so-influxdb
- sls: salt.python3-influxdb
- http: wait_for_influxdb
{% for rp in influxdb.retention_policies.keys() %}
{{rp}}_retention_policy:
influxdb_retention_policy.present:
- name: {{rp}}
- database: telegraf
- duration: {{influxdb.retention_policies[rp].duration}}
- shard_duration: {{influxdb.retention_policies[rp].shard_duration}}
- replication: 1
- default: {{influxdb.retention_policies[rp].get('default', 'False')}}
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ MANAGER }}
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_retention_policy.present_patch
- sls: salt.python3-influxdb
{% endfor %}
{% for dest_rp in influxdb.downsample.keys() %}
so_downsample_cq:
influxdb_continuous_query.present:
- name: so_downsample_cq
- database: telegraf
- query: SELECT mean(*) INTO "{{dest_rp}}".:MEASUREMENT FROM /.*/ GROUP BY time({{influxdb.downsample[dest_rp].resolution}}),*
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ MANAGER }}
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_continuous_query.present_patch
- sls: salt.python3-influxdb
{% endfor %}
{% endif %}
{% endif %}
{% else %}
@@ -65,4 +139,4 @@ append_so-influxdb_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -1,10 +0,0 @@
{ "attributes":
{
"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29",
"defaultRoute":"/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645",
"discover:sampleSize":"100",
"dashboard:defaultDarkTheme":true,
"theme:darkMode":true,
"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"
}
}

View File

@@ -460,7 +460,7 @@
{"attributes":{"description":"","hits":0,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"highlightAll\":true,\"version\":true,\"query\":{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\",\"default_field\":\"*\",\"time_zone\":\"America/New_York\"}},\"language\":\"lucene\"},\"filter\":[]}"},"optionsJSON":"{\"darkTheme\":true,\"useMargins\":true}","panelsJSON":"[{\"version\":\"7.9.0\",\"gridData\":{\"w\":8,\"h\":48,\"x\":0,\"y\":0,\"i\":\"1\"},\"panelIndex\":\"1\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_0\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":32,\"h\":8,\"x\":16,\"y\":0,\"i\":\"2\"},\"panelIndex\":\"2\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_1\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":48,\"h\":24,\"x\":0,\"y\":96,\"i\":\"6\"},\"panelIndex\":\"6\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_2\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":20,\"h\":28,\"x\":8,\"y\":20,\"i\":\"7\"},\"panelIndex\":\"7\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_3\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":20,\"h\":28,\"x\":28,\"y\":20,\"i\":\"8\"},\"panelIndex\":\"8\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_4\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":16,\"h\":24,\"x\":0,\"y\":72,\"i\":\"9\"},\"panelIndex\":\"9\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_5\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":48,\"h\":24,\"x\":0,\"y\":48,\"i\":\"10\"},\"panelIndex\":\"10\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_6\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":20,\"h\":12,\"x\":8,\"y\":8,\"i\":\"11\"},\"panelIndex\":\"11\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_7\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":32,\"h\":24,\"x\":16,\"y\":72,\"i\":\"12\"},\"panelIndex\":\"12\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_8\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":20,\"h\":12,\"x\":28,\"y\":8,\"i\":\"13\"},\"panelIndex\":\"13\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_9\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":48,\"h\":24,\"x\":0,\"y\":120,\"i\":\"14\"},\"panelIndex\":\"14\",\"embeddableConfig\":{\"columns\":[\"event_type\",\"source_ip\",\"source_port\",\"destination_ip\",\"destination_port\",\"_id\"],\"sort\":[\"@timestamp\",\"desc\"],\"enhancements\":{}},\"panelRefName\":\"panel_10\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":8,\"h\":8,\"x\":8,\"y\":0,\"i\":\"15\"},\"panelIndex\":\"15\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_11\"}]","timeRestore":false,"title":"z16.04 - Sysmon - Logs","version":1},"id":"6d189680-6d62-11e7-8ddb-e71eb260f4a3","migrationVersion":{"dashboard":"7.11.0"},"references":[{"id":"b3b449d0-3429-11e7-9d52-4f090484f59e","name":"panel_0","type":"visualization"},{"id":"8cfdeff0-6d6b-11e7-ad64-15aa071374a6","name":"panel_1","type":"visualization"},{"id":"0eb1fd80-6d70-11e7-b09b-f57b22df6524","name":"panel_2","type":"visualization"},{"id":"3072c750-6d71-11e7-b09b-f57b22df6524","name":"panel_3","type":"visualization"},{"id":"7bc74b40-6d71-11e7-b09b-f57b22df6524","name":"panel_4","type":"visualization"},{"id":"13ed0810-6d72-11e7-b09b-f57b22df6524","name":"panel_5","type":"visualization"},{"id":"3b6c92c0-6d72-11e7-b09b-f57b22df6524","name":"panel_6","type":"visualization"},{"id":"e09f6010-6d72-11e7-b09b-f57b22df6524","name":"panel_7","type":"visualization"},{"id":"29611940-6d75-11e7-b09b-f57b22df6524","name":"panel_8","type":"visualization"},{"id":"6b70b840-6d75-11e7-b09b-f57b22df6524","name":"panel_9","type":"visualization"},{"id":"248c1d20-6d6b-11e7-ad64-15aa071374a6","name":"panel_10","type":"search"},{"id":"AWDHHk1sxQT5EBNmq43Y","name":"panel_11","type":"visualization"}],"type":"dashboard","updated_at":"2021-03-19T14:35:12.119Z","version":"WzcwMTQyLDRd"}
{"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}"},"savedSearchRefName":"search_0","title":"SMB - Action (Pie Chart)","uiStateJSON":"{}","version":1,"visState":"{\"title\":\"SMB - Action (Pie Chart)\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"type\":\"pie\",\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"action.keyword\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}}]}"},"id":"6f883480-3aad-11e7-8b17-0d8709b02c80","migrationVersion":{"visualization":"7.11.0"},"references":[{"id":"19849f30-3aab-11e7-8b17-0d8709b02c80","name":"search_0","type":"search"}],"type":"visualization","updated_at":"2021-03-19T14:35:12.119Z","version":"WzcwMTQzLDRd"}
{"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"},"title":"Security Onion - SSL - Subject","uiStateJSON":"{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}","version":1,"visState":"{\"title\":\"Security Onion - SSL - Subject\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\",\"dimensions\":{\"metrics\":[{\"accessor\":1,\"format\":{\"id\":\"number\"},\"params\":{},\"label\":\"Count\",\"aggType\":\"count\"}],\"buckets\":[{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"string\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Missing\",\"parsedUrl\":{\"origin\":\"https://PLACEHOLDER\",\"pathname\":\"/kibana/app/kibana\",\"basePath\":\"/kibana\"}}},\"params\":{},\"label\":\"ssl.certificate.subject.keyword: Descending\",\"aggType\":\"terms\"}]},\"showToolbar\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"ssl.certificate.subject.keyword\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Subject\"}}]}"},"id":"6fccb600-75ec-11ea-9565-7315f4ee5cac","migrationVersion":{"visualization":"7.11.0"},"references":[{"id":"2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","name":"kibanaSavedObjectMeta.searchSourceJSON.index","type":"index-pattern"}],"type":"visualization","updated_at":"2021-03-19T14:35:12.119Z","version":"WzcwMTQ0LDRd"}
{"attributes":{"buildNum":33984,"dashboard:defaultDarkTheme":true,"defaultIndex":"2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute":"/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize":"100","theme:darkMode":true,"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"id":"7.11.2","migrationVersion":{"config":"7.9.0"},"references":[],"type":"config","updated_at":"2021-03-19T14:35:12.119Z","version":"WzcwMTQ1LDRd"}
{"attributes":{"buildNum":39457,"defaultIndex":"2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute":"/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize":100,"theme:darkMode":true,"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion":"7.12.1","id":"7.12.1","migrationVersion":{"config":"7.12.0"},"references":[],"type":"config","updated_at":"2021-04-29T21:42:52.430Z","version":"WzY3NTUsM10="}
{"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"},"title":"Strelka - File - MIME Flavors","uiStateJSON":"{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}","version":1,"visState":"{\"title\":\"Strelka - File - MIME Flavors\",\"type\":\"table\",\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\",\"dimensions\":{\"metrics\":[{\"accessor\":0,\"format\":{\"id\":\"number\"},\"params\":{},\"label\":\"Count\",\"aggType\":\"count\"}],\"buckets\":[]},\"showToolbar\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"file.flavors.mime.keyword\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\"}}]}"},"id":"70243970-772c-11ea-bee5-af7f7c7b8e05","migrationVersion":{"visualization":"7.11.0"},"references":[{"id":"2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","name":"kibanaSavedObjectMeta.searchSourceJSON.index","type":"index-pattern"}],"type":"visualization","updated_at":"2021-03-19T14:35:12.119Z","version":"WzcwMTQ2LDRd"}
{"attributes":{"description":"","kibanaSavedObjectMeta":{"searchSourceJSON":"{\"filter\":[]}"},"savedSearchRefName":"search_0","title":"Modbus - Log Count","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","version":1,"visState":"{\"title\":\"Modbus - Log Count\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"30\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\",\"bgFill\":\"#FB9E00\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}],\"listeners\":{}}"},"id":"AWDG_9KpxQT5EBNmq4Oo","migrationVersion":{"visualization":"7.11.0"},"references":[{"id":"52dc9fe0-342e-11e7-9e93-53b62e1857b2","name":"search_0","type":"search"}],"type":"visualization","updated_at":"2021-03-19T14:35:12.119Z","version":"WzcwMTQ3LDRd"}
{"attributes":{"description":"","hits":0,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"highlightAll\":true,\"version\":true,\"query\":{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\",\"default_field\":\"*\"}},\"language\":\"lucene\"},\"filter\":[]}"},"optionsJSON":"{\"darkTheme\":true,\"useMargins\":true}","panelsJSON":"[{\"version\":\"7.9.0\",\"gridData\":{\"w\":8,\"h\":56,\"x\":0,\"y\":0,\"i\":\"2\"},\"panelIndex\":\"2\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_0\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":32,\"h\":8,\"x\":16,\"y\":0,\"i\":\"3\"},\"panelIndex\":\"3\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_1\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":12,\"h\":24,\"x\":8,\"y\":32,\"i\":\"5\"},\"panelIndex\":\"5\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_2\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":12,\"h\":24,\"x\":20,\"y\":32,\"i\":\"6\"},\"panelIndex\":\"6\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_3\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":48,\"h\":24,\"x\":0,\"y\":56,\"i\":\"8\"},\"panelIndex\":\"8\",\"embeddableConfig\":{\"columns\":[\"source_ip\",\"source_port\",\"destination_ip\",\"destination_port\",\"uid\",\"_id\"],\"sort\":[\"@timestamp\",\"desc\"],\"enhancements\":{}},\"panelRefName\":\"panel_4\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":16,\"h\":24,\"x\":32,\"y\":32,\"i\":\"9\"},\"panelIndex\":\"9\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_5\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":8,\"h\":8,\"x\":8,\"y\":0,\"i\":\"10\"},\"panelIndex\":\"10\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_6\"},{\"version\":\"7.9.0\",\"gridData\":{\"w\":40,\"h\":24,\"x\":8,\"y\":8,\"i\":\"11\"},\"panelIndex\":\"11\",\"embeddableConfig\":{\"enhancements\":{}},\"panelRefName\":\"panel_7\"}]","timeRestore":false,"title":"z16.04 - Bro - Modbus","version":1},"id":"70c005f0-3583-11e7-a588-05992195c551","migrationVersion":{"dashboard":"7.11.0"},"references":[{"id":"b3b449d0-3429-11e7-9d52-4f090484f59e","name":"panel_0","type":"visualization"},{"id":"0d168a30-363f-11e7-a6f7-4f44d7bf1c33","name":"panel_1","type":"visualization"},{"id":"20eabd60-380b-11e7-a1cc-ebc6a7e70e84","name":"panel_2","type":"visualization"},{"id":"3c65f500-380b-11e7-a1cc-ebc6a7e70e84","name":"panel_3","type":"visualization"},{"id":"52dc9fe0-342e-11e7-9e93-53b62e1857b2","name":"panel_4","type":"search"},{"id":"178209e0-6e1b-11e7-b553-7f80727663c1","name":"panel_5","type":"visualization"},{"id":"AWDG_9KpxQT5EBNmq4Oo","name":"panel_6","type":"visualization"},{"id":"453f8b90-4a58-11e8-9b0a-f1d33346f773","name":"panel_7","type":"visualization"}],"type":"dashboard","updated_at":"2021-03-19T14:35:12.119Z","version":"WzcwMTQ4LDRd"}

View File

@@ -90,3 +90,7 @@ PassThroughPattern: (repo\.securityonion\.net:443|download\.docker\.com:443|mirr
# MaxDlSpeed: 500
# MaxInresponsiveDlSize: 64000
# BadRedirDetectMime: text/html
{% set proxy = salt['pillar.get']('manager:proxy') -%}
{% if proxy -%}
Proxy: {{ proxy }}
{% endif -%}

View File

@@ -18,7 +18,6 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set managerproxy = salt['pillar.get']('global:managerupdate', '0') %}
{% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %}
socore_own_saltstack:
@@ -35,8 +34,6 @@ socore_own_saltstack:
- mode: 750
- replace: False
{% if managerproxy == 1 %}
# Create the directories for apt-cacher-ng
aptcacherconfdir:
file.directory:
@@ -60,11 +57,12 @@ aptcacherlogdir:
- makedirs: true
# Copy the config
acngcopyconf:
file.managed:
- name: /opt/so/conf/aptcacher-ng/etc/acng.conf
- source: salt://manager/files/acng/acng.conf
- template: jinja
- show_changes: False
# Install the apt-cacher-ng container
so-aptcacherng:
@@ -84,8 +82,6 @@ append_so-aptcacherng_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-aptcacherng
{% endif %}
strelka_yara_update_old_1:
cron.absent:
- user: root

View File

@@ -31,25 +31,32 @@ name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=https://repo.securityonion.net/file/securityonion-repo/epel/
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/RPM-GPG-KEY-EPEL-7
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://repo.securityonion.net/file/securityonion-repo/docker-ce-stable
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/docker.pub
gpgkey=file:///etc/pki/rpm-gpg/docker.pub
[saltstack]
name=SaltStack repo for RHEL/CentOS $releasever PY3
baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack/
baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack3003/
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/SALTSTACK-GPG-KEY.pub
[saltstack3003]
name=SaltStack repo for RHEL/CentOS $releasever PY3
baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack3003/
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
[wazuh_repo]
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/GPG-KEY-WAZUH
gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
enabled=1
name=Wazuh repository
baseurl=https://repo.securityonion.net/file/securityonion-repo/wazuh_repo/
@@ -57,8 +64,15 @@ protect=1
[wazuh4_repo]
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/GPG-KEY-WAZUH
gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
enabled=1
name=Wazuh repository
baseurl=https://repo.securityonion.net/file/securityonion-repo/wazuh4_repo/
protect=1
protect=1
[securityonion]
name=Security Onion Repo repo
baseurl=https://repo.securityonion.net/file/securityonion-repo/securityonion/
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub

View File

@@ -42,7 +42,14 @@ gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/docker.pub
[saltstack]
name=SaltStack repo for RHEL/CentOS $releasever PY3
baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack/
baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack3003/
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
[saltstack3003]
name=SaltStack repo for RHEL/CentOS $releasever PY3
baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack3003/
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
@@ -60,5 +67,12 @@ gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/GPG-KEY-WAZUH
enabled=1
name=Wazuh repository
baseurl=https://repo.securityonion.net/file/securityonion-repo/wazuh4_repo/
protect=1
baseurl=http://repocache.securityonion.net/file/securityonion-repo/wazuh4_repo/
protect=1
[securityonion]
name=Security Onion Repo
baseurl=http://repocache.securityonion.net/file/securityonion-repo/securityonion/
enabled=1
gpgcheck=1
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub

View File

@@ -12,7 +12,7 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
clean_requirements_on_remove=1
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') -%}
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import']) and ( salt['pillar.get']('global:managerupdate', '0') or salt['pillar.get']('patch:os:source', 'direct') == 'manager' ) -%}
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
{% elif proxy -%}
proxy={{ proxy }}

83
salt/repo/client/init.sls Normal file
View File

@@ -0,0 +1,83 @@
{% from 'repo/client/map.jinja' import ABSENTFILES with context %}
{% from 'repo/client/map.jinja' import REPOPATH with context %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
{% set managerupdates = salt['pillar.get']('global:managerupdate', '0') %}
{% set role = grains.id.split('_') | last %}
# from airgap state
{% if ISAIRGAP and grains.os == 'CentOS' %}
{% set MANAGER = salt['grains.get']('master') %}
airgapyum:
file.managed:
- name: /etc/yum/yum.conf
- source: salt://repo/client/files/centos/airgap/yum.conf
airgap_repo:
pkgrepo.managed:
- humanname: Airgap Repo
- baseurl: https://{{ MANAGER }}/repo
- gpgcheck: 0
- sslverify: 0
{% endif %}
# from airgap and common
{% if ABSENTFILES|length > 0%}
{% for file in ABSENTFILES %}
{{ file }}:
file.absent:
- name: {{ REPOPATH }}{{ file }}
- onchanges_in:
- module: cleanyum
{% endfor %}
{% endif %}
# from common state
# Remove default Repos
{% if grains['os'] == 'CentOS' %}
repair_yumdb:
cmd.run:
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
- onlyif:
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
crsynckeys:
file.recurse:
- name: /etc/pki/rpm_gpg
- source: salt://repo/client/files/centos/keys/
{% if not ISAIRGAP %}
crsecurityonionrepo:
file.managed:
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
- name: /etc/yum.repos.d/securityonion.repo
- source: salt://repo/client/files/centos/securityonion.repo
{% else %}
- name: /etc/yum.repos.d/securityonioncache.repo
- source: salt://repo/client/files/centos/securityonioncache.repo
{% endif %}
- mode: 644
yumconf:
file.managed:
- name: /etc/yum.conf
- source: salt://repo/client/files/centos/yum.conf.jinja
- mode: 644
- template: jinja
- show_changes: False
{% endif %}
cleanyum:
module.run:
- pkg.clean_metadata: []
- onchanges:
{% if ISAIRGAP %}
- file: airgapyum
- pkgrepo: airgap_repo
{% else %}
- file: crsecurityonionrepo
- file: yumconf
{% endif %}
{% endif %}

View File

@@ -0,0 +1,26 @@
{% if grains.os == 'CentOS' %}
{% set REPOPATH = '/etc/yum.repos.d/' %}
{% set ABSENTFILES = [
'CentOS-Base.repo',
'CentOS-CR.repo',
'CentOS-Debuginfo.repo',
'CentOS-fasttrack.repo',
'CentOS-Media.repo',
'CentOS-Sources.repo',
'CentOS-Vault.repo',
'CentOS-x86_64-kernel.repo',
'docker-ce.repo',
'epel.repo',
'epel-testing.repo',
'saltstack.repo',
'wazuh.repo'
]
%}
{% elif grains.os == 'Ubuntu' %}
{% set REPOPATH = '/etc/apt/sources.list.d/' %}
{% set ABSENTFILES = [] %}
{% endif %}

View File

@@ -0,0 +1,4 @@
60c60
< database, name, query, resample_time, coverage_period
---
> database, name, query, resample_time, coverage_period, **client_args

View File

@@ -0,0 +1,16 @@
38c38
< hours = int(duration.split("h"))
---
> hours = int(duration.split("h")[0])
52c52
< def present(name, database, duration="7d", replication=1, default=False, **client_args):
---
> def present(name, database, duration="7d", replication=1, default=False, shard_duration="1d", **client_args):
77c77
< database, name, duration, replication, default, **client_args
---
> database, name, duration, replication, shard_duration, default, **client_args
119c119
< database, name, duration, replication, default, **client_args
---
> database, name, duration, replication, shard_duration, default, **client_args

View File

@@ -0,0 +1,16 @@
427c427
< database, name, duration, replication, default=False, **client_args
---
> database, name, duration, replication, shard_duration, default=False, **client_args
462c462
< client.create_retention_policy(name, duration, replication, database, default)
---
> client.create_retention_policy(name, duration, replication, database, default, shard_duration)
468c468
< database, name, duration, replication, default=False, **client_args
---
> database, name, duration, replication, shard_duration, default=False, **client_args
504c504
< client.alter_retention_policy(name, database, duration, replication, default)
---
> client.alter_retention_policy(name, database, duration, replication, default, shard_duration)

View File

@@ -0,0 +1,3 @@
patch_package:
pkg.installed:
- name: patch

View File

@@ -3,28 +3,33 @@
{% if grains.os == 'Ubuntu' %}
{% set SPLITCHAR = '+' %}
{% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %}
{% set PYTHONINFLUXVERSION = '5.3.1' %}
{% set PYTHON3INFLUX= 'influxdb == ' ~ PYTHONINFLUXVERSION %}
{% set PYTHON3INFLUXDEPS= ['certifi', 'chardet', 'python-dateutil', 'pytz', 'requests'] %}
{% set PYTHONINSTALLER = 'pip' %}
{% else %}
{% set SPLITCHAR = '-' %}
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %}
{% set PYTHONINFLUXVERSION = '5.3.1' %}
{% set PYTHON3INFLUX= 'securityonion-python3-influxdb' %}
{% set PYTHON3INFLUXDEPS= ['python36-certifi', 'python36-chardet', 'python36-dateutil', 'python36-pytz', 'python36-requests'] %}
{% set PYTHONINSTALLER = 'pkg' %}
{% endif %}
{% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{% if grains.os|lower == 'ubuntu' %}
{% set COMMON = 'salt-common' %}
{% elif grains.os|lower in ['centos', 'redhat'] %}
{% set COMMON = 'salt' %}
{% endif %}
{% if grains.saltversion|string != SALTVERSION|string %}
{% if grains.os|lower in ['centos', 'redhat'] %}
{% if ISAIRGAP is sameas true %}
{% set UPGRADECOMMAND = 'yum clean all && yum versionlock delete "salt-*" && /usr/sbin/bootstrap-salt.sh -X -s 120 -r -F -x python3 stable ' ~ SALTVERSION ~ ' && yum versionlock add "salt-*"' %}
{% else %}
{% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && /usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION ~ ' && yum versionlock add "salt-*"' %}
{% endif %}
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -X -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% elif grains.os|lower == 'ubuntu' %}
{% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && /usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION ~ ' && apt-mark hold salt-common && apt-mark hold salt-minion' %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION %}
{% endif %}
{% else %}
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
version: 3002.5
version: 3003

View File

@@ -1,17 +1,16 @@
{% from 'salt/map.jinja' import SALTNOTHELD %}
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'salt/map.jinja' import COMMON with context %}
include:
- salt.minion
salt_master_package:
pkg.installed:
- pkgs:
- {{ COMMON }}
- salt-master
- hold: True
{% if SALTNOTHELD == 1 %}
hold_salt_master_package:
module.run:
- pkg.hold:
- name: salt-master
{% endif %}
salt_master_service:
service.running:

View File

@@ -2,5 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
version: 3002.5
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
version: 3003
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds.

View File

@@ -1,10 +1,27 @@
{% from 'salt/map.jinja' import COMMON with context %}
{% from 'salt/map.jinja' import UPGRADECOMMAND with context %}
{% from 'salt/map.jinja' import SALTVERSION %}
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
{% from 'salt/map.jinja' import SALTNOTHELD %}
{% from 'salt/map.jinja' import SALTPACKAGES %}
{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
{% set service_start_delay = SALTMINION.salt.minion.service_start_delay %}
include:
- salt
- salt.helper-packages
- systemd.reload
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
{% if SALTNOTHELD | int == 0 %}
unhold_salt_packages:
module.run:
- pkg.unhold:
- pkgs:
{% for package in SALTPACKAGES %}
- {{ package }}
{% endfor %}
{% endif %}
install_salt_minion:
cmd.run:
@@ -13,15 +30,19 @@ install_salt_minion:
exec 1>&- # close stdout
exec 2>&- # close stderr
nohup /bin/sh -c '{{ UPGRADECOMMAND }}' &
- onlyif: test "{{INSTALLEDSALTVERSION}}" != "{{SALTVERSION}}"
{% endif %}
salt_minion_package:
pkg.installed:
- pkgs:
- {{ COMMON }}
- salt-minion
- hold: True
- onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
{% if SALTNOTHELD | int == 1 %}
hold_salt_packages:
module.run:
- pkg.hold:
- pkgs:
{% for package in SALTPACKAGES %}
- {{ package }}
{% endfor %}
{% endif %}
set_log_levels:
file.append:
@@ -32,8 +53,26 @@ set_log_levels:
- listen_in:
- service: salt_minion_service
salt_minion_service_unit_file:
file.managed:
- name: /etc/systemd/system/multi-user.target.wants/salt-minion.service
- source: salt://salt/service/salt-minion.service.jinja
- template: jinja
- defaults:
service_start_delay: {{ service_start_delay }}
- onchanges_in:
- module: systemd_reload
- listen_in:
- service: salt_minion_service
{% endif %}
# this has to be outside the if statement above since there are <requisite>_in calls to this state
salt_minion_service:
service.running:
- name: salt-minion
- enable: True
- onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
- onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
patch_pkg:
pkg.installed:
- name: patch

View File

@@ -0,0 +1,70 @@
{% from "salt/map.jinja" import SALT_STATE_CODE_PATH with context %}
{% from "salt/map.jinja" import SALT_MODULE_CODE_PATH with context %}
{% from "salt/map.jinja" import PYTHON3INFLUX with context %}
{% from "salt/map.jinja" import PYTHON3INFLUXDEPS with context %}
{% from "salt/map.jinja" import PYTHONINSTALLER with context %}
include:
- salt.helper-packages
python3_influxdb_dependencies:
{{PYTHONINSTALLER}}.installed:
- pkgs: {{ PYTHON3INFLUXDEPS }}
python3_influxdb:
{{PYTHONINSTALLER}}.installed:
- name: {{ PYTHON3INFLUX }}
# We circumvent the file.patch state putting ERROR in the log by using the unless and file.touch below
# https://github.com/saltstack/salt/pull/47010 and https://github.com/saltstack/salt/issues/52329
#https://github.com/saltstack/salt/issues/59766
influxdb_continuous_query.present_patch:
file.patch:
- name: {{ SALT_STATE_CODE_PATH }}/influxdb_continuous_query.py
- source: salt://salt/files/influxdb_continuous_query.py.patch
- require:
- {{PYTHONINSTALLER}}: python3_influxdb
- pkg: patch_package
- unless: ls /opt/so/state/influxdb_continuous_query.py.patched
influxdb_continuous_query.py.patched:
file.touch:
- name: /opt/so/state/influxdb_continuous_query.py.patched
- onchanges:
- file: influxdb_continuous_query.present_patch
#https://github.com/saltstack/salt/issues/59761
influxdb_retention_policy.present_patch:
file.patch:
- name: {{ SALT_STATE_CODE_PATH }}/influxdb_retention_policy.py
- source: salt://salt/files/influxdb_retention_policy.py.patch
- require:
- {{PYTHONINSTALLER}}: python3_influxdb
- pkg: patch_package
- unless: ls /opt/so/state/influxdb_retention_policy.py.patched
influxdb_retention_policy.py.patched:
file.touch:
- name: /opt/so/state/influxdb_retention_policy.py.patched
- onchanges:
- file: influxdb_retention_policy.present_patch
# We should be able to set reload_modules: True in this state in order to tell salt to reload its python modules due to us possibly installing
# and possibly modifying modules in this state. This is bugged according to https://github.com/saltstack/salt/issues/24925
influxdbmod.py_shard_duration_patch:
file.patch:
- name: {{ SALT_MODULE_CODE_PATH }}/influxdbmod.py
- source: salt://salt/files/influxdbmod.py.patch
- require:
- {{PYTHONINSTALLER}}: python3_influxdb
- pkg: patch_package
- unless: ls /opt/so/state/influxdbmod.py.patched
influxdbmod.py.patched:
file.touch:
- name: /opt/so/state/influxdbmod.py.patched
- onchanges:
- file: influxdbmod.py_shard_duration_patch

View File

@@ -0,0 +1,15 @@
[Unit]
Description=The Salt Minion
Documentation=man:salt-minion(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target salt-master.service
[Service]
KillMode=process
Type=notify
NotifyAccess=all
LimitNOFILE=8192
ExecStart=/usr/bin/salt-minion
ExecStartPre=/bin/sleep {{ salt['pillar.get']('salt:minion:service_start_delay', service_start_delay) }}
[Install]
WantedBy=multi-user.target

View File

@@ -1,5 +1,6 @@
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
{%- set DESCRIPTION = salt['pillar.get']('sensoroni:node_description') %}
{%- set DESCRIPTION = salt['pillar.get']('sensoroni:node_description', '') %}
{%- set MODEL = salt['grains.get']('sosmodel', '') %}
{%- set ADDRESS = salt['pillar.get']('sensoroni:node_address') %}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %}
{%- set CHECKININTERVALMS = salt['pillar.get']('sensoroni:node_checkin_interval_ms', 10000) %}
@@ -9,14 +10,16 @@
{%- else %}
{%- set STENODEFAULT = False %}
{%- endif %}
{%- set STENOENABLED = salt['pillar.get']('steno:enabled', STENODEFAULT) %}
{%- set STENOENABLED = salt['pillar.get']('steno:enabled', STENODEFAULT) %}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"info",
"agent": {
"nodeId": "{{ grains.host | lower }}",
"role": "{{ grains.role }}",
"description": "{{ DESCRIPTION }}",
"address": "{{ ADDRESS }}",
"model": "{{ MODEL }}",
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
"verifyCert": false,

View File

@@ -1,7 +1,7 @@
[
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
"links": [
"/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
"/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
]},
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
"links": [
@@ -18,6 +18,10 @@
"/joblookup?esid={:soc_id}",
"/joblookup?ncid={:network.community_id}"
]},
{ "name": "actionCyberChef", "description": "actionCyberChefHelp", "icon": "fas fa-bread-slice", "target": "_blank",
"links": [
"/cyberchef/#input={value|base64}"
]},
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "target": "_blank",
"links": [
"https://www.google.com/search?q={value}"
@@ -26,4 +30,4 @@
"links": [
"https://www.virustotal.com/gui/search/{value}"
]}
]
]

View File

@@ -1,7 +1,7 @@
[
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
"links": [
"/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
"/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
]},
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
"links": [
@@ -18,6 +18,10 @@
"/joblookup?esid={:soc_id}",
"/joblookup?ncid={:network.community_id}"
]},
{ "name": "actionCyberChef", "description": "actionCyberChefHelp", "icon": "fas fa-bread-slice", "target": "_blank",
"links": [
"/cyberchef/#input={value|base64}"
]},
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "target": "_blank",
"links": [
"https://www.google.com/search?q={value}"
@@ -26,4 +30,4 @@
"links": [
"https://www.virustotal.com/gui/search/{value}"
]}
]
]

View File

@@ -34,7 +34,7 @@
{ "name": "HTTP", "description": "HTTP grouped by status code and message", "query": "event.dataset:http | groupby http.status_code http.status_message"},
{ "name": "HTTP", "description": "HTTP grouped by method and user agent", "query": "event.dataset:http | groupby http.method http.useragent"},
{ "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"},
{ "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
{ "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND (file.resp_mime_types:dosexec OR file.resp_mime_types:executable) | groupby http.virtual_host"},
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator.keyword"},
{ "name": "IRC", "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"},
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"},

View File

@@ -1,6 +1,6 @@
## Getting Started
New to Security Onion 2? Check out the [Online Help](/docs/) and [Cheatsheet](/docs/cheatsheet.pdf) to learn how to best utilize Security Onion to hunt for evil! Find them in the upper-right menu.
New to Security Onion 2? Check out the [Online Help](/docs/) and [Cheatsheet](/docs/cheatsheet.pdf) to learn how to best utilize Security Onion to hunt for evil! Find them in the upper-right menu. Also, watch our free Security Onion 2 Essentials online course, available on our [Training](https://securityonionsolutions.com/training) website.
If you're ready to dive-in, take a look at the [Alerts](/#/alerts) interface to see what Security Onion has detected so far. Or navigate to the [Hunt](/#/hunt) interface to hunt for evil that the alerts might have missed!
@@ -10,16 +10,18 @@ The release notes have moved to the upper-right menu. Click on the [What's New](
## Customize This Space
Make this area your own by customizing the content. The content is stored in the `motd.md` file, which uses the common Markdown (.md) format. Visit [mardownguide.org](https://www.markdownguide.org/) to learn more about the simple Markdown format.
Make this area your own by customizing the content. The content is stored in the `motd.md` file, which uses the common Markdown (.md) format. Visit [markdownguide.org](https://www.markdownguide.org/) to learn more about the simple Markdown format.
To customize this content, login to the manager via SSH and execute the following command:
```bash
cp -f /opt/so/saltstack/default/salt/soc/files/soc/motd.md /opt/so/saltstack/local/salt/soc/files/soc/motd.md
sudo cp /opt/so/saltstack/default/salt/soc/files/soc/motd.md /opt/so/saltstack/local/salt/soc/files/soc/
```
Now, edit the new file as desired. Finally, run this command:
and edit the new file as desired.
Finally, run this command:
```bash
salt-call state.apply soc queue=True
sudo so-soc-restart
```

View File

@@ -53,6 +53,17 @@
"cacheMs": {{ ES_FIELDCAPS_CACHE }},
"verifyCert": false
},
"influxdb": {
{%- if grains['role'] in ['so-import'] %}
"hostUrl": "",
{%- else %}
"hostUrl": "https://{{ MANAGERIP }}:8086",
{%- endif %}
"token": "",
"org": "",
"bucket": "telegraf",
"verifyCert": false
},
"sostatus": {
"refreshIntervalMs": 30000,
"offlineThresholdMs": 900000

View File

@@ -8,8 +8,8 @@
[es]
es_url = https://{{MANAGER}}:9200
es_ip = {{MANAGER}}
es_user = YOURESUSER
es_pass = YOURESPASS
es_user =
es_pass =
es_index_pattern = so-*
es_verifycert = no

View File

@@ -68,8 +68,9 @@ removeesp12dir:
- x509: /etc/pki/influxdb.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Create a cert for the talking to influxdb
/etc/pki/influxdb.crt:
@@ -78,6 +79,7 @@ removeesp12dir:
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
- CN: {{ manager }}
- subjectAltName: DNS:{{ HOSTNAME }}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -86,8 +88,9 @@ removeesp12dir:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/influxdb.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
influxkeyperms:
file.managed:
@@ -111,8 +114,9 @@ influxkeyperms:
- x509: /etc/pki/redis.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
/etc/pki/redis.crt:
x509.certificate_managed:
@@ -128,8 +132,9 @@ influxkeyperms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
rediskeyperms:
file.managed:
@@ -153,8 +158,9 @@ rediskeyperms:
- x509: /etc/pki/filebeat.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
/etc/pki/filebeat.crt:
@@ -175,8 +181,9 @@ rediskeyperms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt"
- onchanges:
@@ -232,8 +239,9 @@ fbcrtlink:
- x509: /etc/pki/registry.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Create a cert for the docker registry
/etc/pki/registry.crt:
@@ -250,8 +258,9 @@ fbcrtlink:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/registry.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
regkeyperms:
file.managed:
@@ -273,8 +282,9 @@ regkeyperms:
- x509: /etc/pki/minio.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Create a cert for minio
/etc/pki/minio.crt:
@@ -291,8 +301,9 @@ regkeyperms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/minio.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
miniokeyperms:
file.managed:
@@ -315,8 +326,9 @@ miniokeyperms:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
/etc/pki/elasticsearch.crt:
x509.certificate_managed:
@@ -332,8 +344,9 @@ miniokeyperms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:
@@ -366,8 +379,9 @@ elasticp12perms:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Create a cert for the reverse proxy
/etc/pki/managerssl.crt:
@@ -385,8 +399,9 @@ elasticp12perms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
msslkeyperms:
file.managed:
@@ -409,8 +424,9 @@ msslkeyperms:
- x509: /etc/pki/fleet.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
/etc/pki/fleet.crt:
x509.certificate_managed:
@@ -425,8 +441,9 @@ msslkeyperms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
fleetkeyperms:
file.managed:
@@ -456,8 +473,9 @@ fbcertdir:
- x509: /opt/so/conf/filebeat/etc/pki/filebeat.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
/opt/so/conf/filebeat/etc/pki/filebeat.crt:
@@ -478,8 +496,9 @@ fbcertdir:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /opt/so/conf/filebeat/etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Convert the key to pkcs#8 so logstash will work correctly.
filebeatpkcs:
@@ -520,8 +539,9 @@ chownfilebeatp8:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
# Create a cert for the reverse proxy
/etc/pki/managerssl.crt:
@@ -539,8 +559,9 @@ chownfilebeatp8:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
msslkeyperms:
file.managed:
@@ -563,8 +584,9 @@ msslkeyperms:
- x509: /etc/pki/fleet.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
/etc/pki/fleet.crt:
x509.certificate_managed:
@@ -579,8 +601,9 @@ msslkeyperms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
fleetkeyperms:
file.managed:
@@ -606,8 +629,9 @@ fleetkeyperms:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
/etc/pki/elasticsearch.crt:
x509.certificate_managed:
@@ -623,8 +647,9 @@ fleetkeyperms:
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# Gzip the eve logs
find /nsm/suricata/eve*.json -type f -printf '%T@\t%p\n' | sort -t $'\t' -g | head -n -1 | cut -d $'\t' -f 2 | xargs nice gzip
find /nsm/suricata/eve*.json -type f -printf '%T@\t%p\n' | sort -t $'\t' -g | head -n -1 | cut -d $'\t' -f 2 | xargs nice gzip >/dev/null 2>&1
# TODO Add stats log

View File

@@ -1,4 +1,18 @@
{% if salt['pillar.get']('sensor:suriprocs') %}
{% if salt['pillar.get']('sensor:suripins') %}
{% load_yaml as cpu_affinity%}
cpu-affinity:
- management-cpu-set:
cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings
- receive-cpu-set:
cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings
- worker-cpu-set:
cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ]
mode: "exclusive"
threads: {{ salt['pillar.get']('sensor:suripins')|length }}
prio:
default: "high"
{% endload %}
{% elif salt['pillar.get']('sensor:suriprocs') %}
{% load_yaml as cpu_affinity%}
cpu-affinity:
- management-cpu-set:
@@ -15,18 +29,4 @@ cpu-affinity:
high: [ 3 ]
default: "high"
{% endload %}
{% elif salt['pillar.get']('sensor:suripins') %}
{% load_yaml as cpu_affinity%}
cpu-affinity:
- management-cpu-set:
cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings
- receive-cpu-set:
cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings
- worker-cpu-set:
cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ]
mode: "exclusive"
threads: {{ salt['pillar.get']('sensor:suripins')|length }}
prio:
default: "high"
{% endload %}
{% endif %}
{% endif %}

3
salt/systemd/reload.sls Normal file
View File

@@ -0,0 +1,3 @@
systemd_reload:
module.run:
- service.systemctl_reload: []

View File

@@ -17,6 +17,7 @@
{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %}
{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
{% set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
{%- set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
# Global tags can be specified here in key="value" format.
[global_tags]
@@ -663,14 +664,45 @@
# # Read metrics from one or more commands that can output to stdout
[[inputs.exec]]
commands = [
"/scripts/sostatus.sh"
]
data_format = "influx"
timeout = "15s"
interval = "60s"
# ## Commands array
{% if grains['role'] in ['so-manager', 'so-managersearch'] %}
{% if grains['role'] in ['so-manager'] %}
[[inputs.exec]]
commands = [
"/scripts/redis.sh",
"/scripts/influxdbsize.sh",
"/scripts/raid.sh",
"/scripts/beatseps.sh"
]
data_format = "influx"
## Timeout for each command to complete.
timeout = "15s"
{% elif grains['role'] in ['so-managersearch'] %}
[[inputs.exec]]
commands = [
"/scripts/redis.sh",
"/scripts/influxdbsize.sh",
"/scripts/eps.sh",
"/scripts/raid.sh"
"/scripts/raid.sh",
"/scripts/beatseps.sh"
]
data_format = "influx"
## Timeout for each command to complete.
timeout = "15s"
{% elif grains['role'] in ['so-node'] %}
[[inputs.exec]]
commands = [
"/scripts/eps.sh",
"/scripts/raid.sh",
"/scripts/beatseps.sh"
]
data_format = "influx"
## Timeout for each command to complete.
@@ -686,7 +718,8 @@
"/scripts/zeekcaptureloss.sh",
{% endif %}
"/scripts/oldpcap.sh",
"/scripts/raid.sh"
"/scripts/raid.sh",
"/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"
@@ -702,7 +735,8 @@
{% endif %}
"/scripts/oldpcap.sh",
"/scripts/eps.sh",
"/scripts/raid.sh"
"/scripts/raid.sh",
"/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"
@@ -720,7 +754,8 @@
{% endif %}
"/scripts/oldpcap.sh",
"/scripts/eps.sh",
"/scripts/raid.sh"
"/scripts/raid.sh",
"/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"
@@ -737,7 +772,8 @@
{% endif %}
"/scripts/oldpcap.sh",
"/scripts/influxdbsize.sh",
"/scripts/raid.sh"
"/scripts/raid.sh",
"/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"

View File

@@ -72,6 +72,8 @@ so-telegraf:
- /opt/so/conf/telegraf/scripts:/scripts:ro
- /opt/so/log/stenographer:/var/log/stenographer:ro
- /opt/so/log/suricata:/var/log/suricata:ro
- /opt/so/log/raid:/var/log/raid:ro
- /opt/so/log/sostatus:/var/log/sostatus:ro
- watch:
- file: tgrafconf
- file: tgrafsyncscripts

View File

@@ -0,0 +1,48 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
if [ ! "$THEGREP" ]; then
PREVCOUNTFILE='/tmp/beatseps.txt'
EVENTCOUNTCURRENT="$(curl -s localhost:5066/stats | jq '.libbeat.output.events.acked')"
FAILEDEVENTCOUNT="$(curl -s localhost:5066/stats | jq '.libbeat.output.events.failed')"
if [ ! -z "$EVENTCOUNTCURRENT" ]; then
if [ -f "$PREVCOUNTFILE" ]; then
EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
else
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
exit 0
fi
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
# the division by 30 is because the agent interval is 30 seconds
EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
if [ "$EVENTS" -lt 0 ]; then
EVENTS=0
fi
echo "fbstats eps=${EVENTS%%.*},failed=$FAILEDEVENTCOUNT"
fi
else
exit 0
fi

View File

@@ -15,15 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=checkfiles
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
FILES=$(ls -1x /host/nsm/faf/complete/ | wc -l)
if [ ! "$THEGREP" ]; then
echo "faffiles files=$FILES"
FILES=$(ls -1x /host/nsm/strelka/unprocessed | wc -l)
echo "faffiles files=$FILES"
else
exit 0
fi

View File

@@ -15,36 +15,32 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=eps
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
PREVCOUNTFILE='/tmp/eps.txt'
EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.events.in')"
if [ ! "$THEGREP" ]; then
if [ ! -z "$EVENTCOUNTCURRENT" ]; then
PREVCOUNTFILE='/tmp/eps.txt'
EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.events.in')"
if [ -f "$PREVCOUNTFILE" ]; then
EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
else
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
if [ ! -z "$EVENTCOUNTCURRENT" ]; then
if [ -f "$PREVCOUNTFILE" ]; then
EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
else
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
exit 0
fi
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
# the division by 30 is because the agent interval is 30 seconds
EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
if [ "$EVENTS" -lt 0 ]; then
EVENTS=0
fi
echo "consumptioneps eps=${EVENTS%%.*}"
fi
else
exit 0
fi
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
# the division by 30 is because the agent interval is 30 seconds
EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
if [ "$EVENTS" -lt 0 ]; then
EVENTS=0
fi
echo "esteps eps=${EVENTS%%.*}"
fi
exit 0

View File

@@ -15,35 +15,30 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=helixeps
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
PREVCOUNTFILE='/tmp/helixevents.txt'
EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.pipelines.helix.events.out')"
if [ ! "$THEGREP" ]; then
if [ ! -z "$EVENTCOUNTCURRENT" ]; then
PREVCOUNTFILE='/tmp/helixevents.txt'
EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.pipelines.helix.events.out')"
if [ ! -z "$EVENTCOUNTCURRENT" ]; then
if [ -f "$PREVCOUNTFILE" ]; then
EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
else
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
exit 0
fi
if [ -f "$PREVCOUNTFILE" ]; then
EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
else
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
if [ "$EVENTS" -lt 0 ]; then
EVENTS=0
fi
echo "helixeps eps=${EVENTS%%.*}"
fi
else
exit 0
fi
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
if [ "$EVENTS" -lt 0 ]; then
EVENTS=0
fi
echo "helixeps eps=${EVENTS%%.*}"
fi
exit 0
fi

View File

@@ -15,15 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=influxsize
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
INFLUXSIZE=$(du -s -k /host/nsm/influxdb | awk {'print $1'})
if [ ! "$THEGREP" ]; then
echo "influxsize kbytes=$INFLUXSIZE"
INFLUXSIZE=$(du -s -k /host/nsm/influxdb | awk {'print $1'})
echo "influxsize kbytes=$INFLUXSIZE"
else
exit 0
fi

View File

@@ -15,18 +15,16 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=oldpcap
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
# Get the data
OLDPCAP=$(find /host/nsm/pcap -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'})
DATE=$(date +%s)
AGE=$(($DATE - $OLDPCAP))
if [ ! "$THEGREP" ]; then
echo "pcapage seconds=$AGE"
# Get the data
OLDPCAP=$(find /host/nsm/pcap -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'})
DATE=$(date +%s)
AGE=$(($DATE - $OLDPCAP))
echo "pcapage seconds=$AGE"
else
exit 0
fi

View File

@@ -15,19 +15,17 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=raid
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
RAIDLOG=/var/log/raid/status.log
RAIDSTATUS=$(cat /var/log/raid/status.log)
if [ -f "$RAIDLOG" ]; then
echo "raid raidstatus=$RAIDSTATUS "
if [ ! "$THEGREP" ]; then
if [ -f "$RAIDLOG" ]; then
echo "raid $RAIDSTATUS"
else
exit 0
fi
else
exit 0
fi

View File

@@ -15,17 +15,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
APP=redis
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
if [ ! "$THEGREP" ]; then
UNPARSED=$(redis-cli llen logstash:unparsed | awk '{print $1}')
PARSED=$(redis-cli llen logstash:parsed | awk '{print $1}')
UNPARSED=$(redis-cli llen logstash:unparsed | awk '{print $1}')
PARSED=$(redis-cli llen logstash:parsed | awk '{print $1}')
echo "redisqueue unparsed=$UNPARSED,parsed=$PARSED"
echo "redisqueue unparsed=$UNPARSED,parsed=$PARSED"
else
exit 0
fi

View File

@@ -0,0 +1,31 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
if [ ! "$THEGREP" ]; then
SOSTATUSLOG=/var/log/sostatus/status.log
SOSTATUSSTATUS=$(cat /var/log/sostatus/status.log)
if [ -f "$SOSTATUSLOG" ]; then
echo "sostatus status=$SOSTATUSSTATUS"
else
exit 0
fi
else
exit 0
fi

View File

@@ -15,31 +15,29 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=stenoloss
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
TSFILE=/var/log/telegraf/laststenodrop.log
if [ -f "$TSFILE" ]; then
LASTTS=$(cat $TSFILE)
if [ ! "$THEGREP" ]; then
TSFILE=/var/log/telegraf/laststenodrop.log
if [ -f "$TSFILE" ]; then
LASTTS=$(cat $TSFILE)
else
LASTTS=0
fi
# Get the data
LOGLINE=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop)
CURRENTTS=$(echo $LOGLINE | awk '{print $1}')
if [[ "$CURRENTTS" != "$LASTTS" ]]; then
DROP=$(echo $LOGLINE | awk '{print $14}' | awk -F "=" '{print $2}')
echo $CURRENTTS > $TSFILE
else
DROP=0
fi
echo "stenodrop drop=$DROP"
else
LASTTS=0
fi
# Get the data
LOGLINE=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop)
CURRENTTS=$(echo $LOGLINE | awk '{print $1}')
if [[ "$CURRENTTS" != "$LASTTS" ]]; then
DROP=$(echo $LOGLINE | awk '{print $14}' | awk -F "=" '{print $2}')
echo $CURRENTTS > $TSFILE
else
DROP=0
fi
echo "stenodrop drop=$DROP"
exit 0
fi

View File

@@ -16,37 +16,33 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
APP=suriloss
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
SURILOG=$(tac /var/log/suricata/stats.log | grep kernel | head -4)
CHECKIT=$(echo $SURILOG | grep -o 'drop' | wc -l)
if [ ! "$THEGREP" ]; then
if [ $CHECKIT == 2 ]; then
declare RESULT=($SURILOG)
SURILOG=$(tac /var/log/suricata/stats.log | grep kernel | head -4)
CHECKIT=$(echo $SURILOG | grep -o 'drop' | wc -l)
CURRENTDROP=${RESULT[4]}
PASTDROP=${RESULT[14]}
DROPPED=$((CURRENTDROP - PASTDROP))
if [ $DROPPED == 0 ]; then
LOSS=0
echo "suridrop drop=0"
else
CURRENTPACKETS=${RESULT[9]}
PASTPACKETS=${RESULT[19]}
TOTALCURRENT=$((CURRENTPACKETS + CURRENTDROP))
TOTALPAST=$((PASTPACKETS + PASTDROP))
TOTAL=$((TOTALCURRENT - TOTALPAST))
if [ $CHECKIT == 2 ]; then
declare RESULT=($SURILOG)
LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
echo "suridrop drop=$LOSS"
fi
CURRENTDROP=${RESULT[4]}
PASTDROP=${RESULT[14]}
DROPPED=$((CURRENTDROP - PASTDROP))
if [ $DROPPED == 0 ]; then
LOSS=0
echo "suridrop drop=0"
else
CURRENTPACKETS=${RESULT[9]}
PASTPACKETS=${RESULT[19]}
TOTALCURRENT=$((CURRENTPACKETS + CURRENTDROP))
TOTALPAST=$((PASTPACKETS + PASTDROP))
TOTAL=$((TOTALCURRENT - TOTALPAST))
LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
echo "suridrop drop=$LOSS"
fi
fi
else
echo "suridrop drop=0"
fi

View File

@@ -18,35 +18,33 @@
# This script returns the average of all the workers average capture loss to telegraf / influxdb in influx format include nanosecond precision timestamp
APP=zeekcaploss
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
if [ -d "/host/nsm/zeek/spool/logger" ]; then
WORKERS={{ salt['pillar.get']('sensor:zeek_lbprocs', salt['pillar.get']('sensor:zeek_pins') | length) }}
ZEEKLOG=/host/nsm/zeek/spool/logger/capture_loss.log
elif [ -d "/host/nsm/zeek/spool/zeeksa" ]; then
WORKERS=1
ZEEKLOG=/host/nsm/zeek/spool/zeeksa/capture_loss.log
else
echo 'Zeek capture_loss.log not found' >/dev/stderr
exit 2
fi
if [ ! "$THEGREP" ]; then
LASTCAPTURELOSSLOG=/var/log/telegraf/lastcaptureloss.txt
if [ -f "$ZEEKLOG" ]; then
CURRENTTS=$(tail -1 $ZEEKLOG | jq .ts | sed 's/"//g')
if [ -f "$LASTCAPTURELOSSLOG" ]; then
LASTTS=$(cat $LASTCAPTURELOSSLOG)
if [[ "$LASTTS" != "$CURRENTTS" ]]; then
LOSS=$(tail -$WORKERS $ZEEKLOG | awk -F, '{print $NF}' | sed 's/}//' | awk -v WORKERS=$WORKERS -F: '{LOSS += $2 / WORKERS} END { print LOSS}')
echo "zeekcaptureloss loss=$LOSS"
if [ -d "/host/nsm/zeek/spool/logger" ]; then
WORKERS={{ salt['pillar.get']('sensor:zeek_lbprocs', salt['pillar.get']('sensor:zeek_pins') | length) }}
ZEEKLOG=/host/nsm/zeek/spool/logger/capture_loss.log
elif [ -d "/host/nsm/zeek/spool/zeeksa" ]; then
WORKERS=1
ZEEKLOG=/host/nsm/zeek/spool/zeeksa/capture_loss.log
else
echo 'Zeek capture_loss.log not found' >/dev/stderr
exit 2
fi
fi
echo "$CURRENTTS" > $LASTCAPTURELOSSLOG
LASTCAPTURELOSSLOG=/var/log/telegraf/lastcaptureloss.txt
if [ -f "$ZEEKLOG" ]; then
CURRENTTS=$(tail -1 $ZEEKLOG | jq .ts | sed 's/"//g')
if [ -f "$LASTCAPTURELOSSLOG" ]; then
LASTTS=$(cat $LASTCAPTURELOSSLOG)
if [[ "$LASTTS" != "$CURRENTTS" ]]; then
LOSS=$(tail -$WORKERS $ZEEKLOG | awk -F, '{print $NF}' | sed 's/}//' | awk -v WORKERS=$WORKERS -F: '{LOSS += $2 / WORKERS} END { print LOSS}')
echo "zeekcaptureloss loss=$LOSS"
fi
fi
echo "$CURRENTTS" > $LASTCAPTURELOSSLOG
fi
else
exit 0
fi

View File

@@ -17,34 +17,32 @@
# This script returns the packets dropped by Zeek, but it isn't a percentage. $LOSS * 100 would be the percentage
APP=zeekloss
lf=/tmp/$APP-pidLockFile
# create empty lock file if none exists
cat /dev/null >> $lf
read lastPID < $lf
# if lastPID is not null and a process with that pid exists , exit
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
echo $$ > $lf
THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
ZEEKLOG=$(tac /host/nsm/zeek/logs/packetloss.log | head -2)
declare RESULT=($ZEEKLOG)
CURRENTDROP=${RESULT[3]}
# zeek likely not running if this is true
if [[ $CURRENTDROP == "rcvd:" ]]; then
CURRENTDROP=0
PASTDROP=0
DROPPED=0
if [ ! "$THEGREP" ]; then
ZEEKLOG=$(tac /host/nsm/zeek/logs/packetloss.log | head -2)
declare RESULT=($ZEEKLOG)
CURRENTDROP=${RESULT[3]}
# zeek likely not running if this is true
if [[ $CURRENTDROP == "rcvd:" ]]; then
CURRENTDROP=0
PASTDROP=0
DROPPED=0
else
PASTDROP=${RESULT[9]}
DROPPED=$((CURRENTDROP - PASTDROP))
fi
if [[ "$DROPPED" -le 0 ]]; then
LOSS=0
echo "zeekdrop drop=0"
else
CURRENTPACKETS=${RESULT[5]}
PASTPACKETS=${RESULT[11]}
TOTAL=$((CURRENTPACKETS - PASTPACKETS))
LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
echo "zeekdrop drop=$LOSS"
fi
else
PASTDROP=${RESULT[9]}
DROPPED=$((CURRENTDROP - PASTDROP))
fi
if [[ "$DROPPED" -le 0 ]]; then
LOSS=0
echo "zeekdrop drop=0"
else
CURRENTPACKETS=${RESULT[5]}
PASTPACKETS=${RESULT[11]}
TOTAL=$((CURRENTPACKETS - PASTPACKETS))
LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
echo "zeekdrop drop=$LOSS"
exit 0
fi

View File

@@ -14,27 +14,21 @@
{% set CURATOR = salt['pillar.get']('curator:enabled', True) %}
{% set REDIS = salt['pillar.get']('redis:enabled', True) %}
{% set STRELKA = salt['pillar.get']('strelka:enabled', '0') %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
{% set saltversion = saltversion.salt.minion.version %}
{% set INSTALLEDSALTVERSION = grains.saltversion %}
base:
'not G@saltversion:{{saltversion}}':
- match: compound
- salt.minion-state-apply-test
{% if ISAIRGAP is sameas true %}
- airgap
{% endif %}
- repo.client
- salt.minion
'G@os:CentOS and G@saltversion:{{saltversion}}':
- match: compound
{% if ISAIRGAP is sameas true %}
- airgap
{% else %}
- yum
{% endif %}
- repo.client
- yum.packages
'* and G@saltversion:{{saltversion}}':

View File

@@ -1,17 +0,0 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
yumconf:
file.managed:
- name: /etc/yum.conf
- source: salt://yum/etc/yum.conf.jinja
- mode: 644
- template: jinja
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -42,7 +42,6 @@ INTERWEBS=AIRGAP
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=

View File

@@ -35,6 +35,7 @@ ADMINPASS2=onionuser
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=distributed-search
INTERWEBS=AIRGAP
install_type=SEARCHNODE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=

View File

@@ -35,6 +35,7 @@ ZEEKVERSION=ZEEK
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=distributed-sensor
INTERWEBS=AIRGAP
install_type=SENSOR
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=

View File

@@ -41,7 +41,6 @@ install_type=MANAGER
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=

Some files were not shown because too many files have changed in this diff Show More