Merge in upstream dev

This commit is contained in:
Josh Brower
2022-05-06 20:01:07 -04:00
176 changed files with 4401 additions and 812 deletions

31
.github/workflows/pythontest.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: python-test
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
python-code-path: ["salt/sensoroni/files/analyzers"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest pytest-cov
find . -name requirements.txt -exec pip install -r {} \;
- name: Lint with flake8
run: |
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
- name: Test with pytest
run: |
pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=90 --cov-config=${{ matrix.python-code-path }}/pytest.ini

13
.gitignore vendored
View File

@@ -56,4 +56,15 @@ $RECYCLE.BIN/
# Windows shortcuts
*.lnk
# End of https://www.gitignore.io/api/macos,windows
# End of https://www.gitignore.io/api/macos,windows
# Pytest output
__pycache__
.pytest_cache
.coverage
*.pyc
.venv
# Analyzer dev/test config files
*_dev.yaml
site-packages

1
HOTFIX
View File

@@ -0,0 +1 @@

View File

@@ -1,6 +1,6 @@
## Security Onion 2.3.110
## Security Onion 2.3.120
Security Onion 2.3.110 is here!
Security Onion 2.3.120 is here!
## Screenshots

View File

@@ -1,18 +1,18 @@
### 2.3.110-20220309 ISO image built on 2022/03/09
### 2.3.120-20220425 ISO image built on 2022/04/25
### Download and Verify
2.3.110-20220309 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220309.iso
2.3.120-20220425 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso
MD5: 537564F8B56633E2D46E5E7C4E2BF18A
SHA1: 1E1B42EDB711AC8B5963B3460056770B91AE6BFC
SHA256: 4D73E5BE578DA43DCFD3C1B5F9AF07A7980D8DF90ACDDFEF6CEA177F872EECA0
MD5: C99729E452B064C471BEF04532F28556
SHA1: 60BF07D5347C24568C7B793BFA9792E98479CFBF
SHA256: CD17D0D7CABE21D45FA45E1CF91C5F24EB9608C79FF88480134E5592AFDD696E
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220309.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220309.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220309.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.3.110-20220309.iso.sig securityonion-2.3.110-20220309.iso
gpg --verify securityonion-2.3.120-20220425.iso.sig securityonion-2.3.120-20220425.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Wed 09 Mar 2022 10:20:47 AM EST using RSA key ID FE507013
gpg: Signature made Mon 25 Apr 2022 08:20:40 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.3.120
2.3.130

View File

@@ -13,4 +13,5 @@ logstash:
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
- so/9800_output_logscan.conf.jinja
- so/9801_output_rita.conf.jinja
- so/9900_output_endgame.conf.jinja

View File

@@ -131,3 +131,6 @@ base:
{% endif %}
- global
- minions.{{ grains.id }}
'*_workstation':
- minions.{{ grains.id }}

View File

@@ -217,6 +217,8 @@
'schedule',
'docker_clean'
],
'so-workstation': [
],
}, grain='role') %}
{% if FILEBEAT and grains.role in ['so-helixsensor', 'so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-import', 'so-receiver'] %}

View File

@@ -300,8 +300,17 @@ sostatus_log:
- month: '*'
- dayweek: '*'
{% if role in ['eval', 'manager', 'managersearch', 'standalone'] %}
# Install cron job to determine size of influxdb for telegraf
'du -s -k /nsm/influxdb | cut -f1 > /opt/so/log/telegraf/influxdb_size.log 2>&1':
cron.present:
- user: root
- minute: '*/1'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
# Lock permissions on the backup directory
backupdir:
file.directory:

View File

@@ -15,295 +15,86 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
doc_workstation_url="https://docs.securityonion.net/en/2.3/analyst-vm.html"
{# we only want the script to install the workstation if it is CentOS -#}
{% if grains.os == 'CentOS' -%}
{# if this is a manager -#}
{% if grains.master == grains.id.split('_')|first -%}
INSTALL_LOG=/root/so-analyst-install.log
exec &> >(tee -a "$INSTALL_LOG")
source /usr/sbin/so-common
pillar_file="/opt/so/saltstack/local/pillar/minions/{{grains.id}}.sls"
log() {
msg=$1
level=${2:-I}
now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ")
echo -e "$now | $level | $msg" >> "$INSTALL_LOG" 2>&1
}
if [ -f "$pillar_file" ]; then
if ! grep -q "^workstation:$" "$pillar_file"; then
error() {
log "$1" "E"
}
info() {
log "$1" "I"
}
title() {
echo -e "\n-----------------------------\n $1\n-----------------------------\n" >> "$INSTALL_LOG" 2>&1
}
logCmd() {
cmd=$1
info "Executing command: $cmd"
$cmd >> "$INSTALL_LOG" 2>&1
}
analyze_system() {
title "System Characteristics"
logCmd "uptime"
logCmd "uname -a"
logCmd "free -h"
logCmd "lscpu"
logCmd "df -h"
logCmd "ip a"
}
analyze_system
OS=$(grep PRETTY_NAME /etc/os-release | grep 'CentOS Linux 7')
if [ $? -ne 0 ]; then
echo "This is an unsupported OS. Please use CentOS 7 to install the analyst node."
exit 1
fi
if [[ "$manufacturer" == "Security Onion Solutions" && "$family" == "Automated" ]]; then
INSTALL=yes
CURLCONTINUE=no
else
INSTALL=''
CURLCONTINUE=''
fi
FIRSTPASS=yes
while [[ $INSTALL != "yes" ]] && [[ $INSTALL != "no" ]]; do
if [[ "$FIRSTPASS" == "yes" ]]; then
clear
echo "###########################################"
echo "## ** W A R N I N G ** ##"
echo "## _______________________________ ##"
echo "## ##"
echo "## Installing the Security Onion ##"
echo "## analyst node on this device will ##"
echo "## make permanent changes to ##"
echo "## the system. ##"
echo "## ##"
echo "###########################################"
echo "Do you wish to continue? (Type the entire word 'yes' to proceed or 'no' to exit)"
FIRSTPASS=no
else
echo "Please type 'yes' to continue or 'no' to exit."
fi
read INSTALL
done
if [[ $INSTALL == "no" ]]; then
echo "Exiting analyst node installation."
exit 0
fi
echo "Testing for internet connection with curl https://securityonionsolutions.com/"
CANCURL=$(curl -sI https://securityonionsolutions.com/ | grep "200 OK")
if [ $? -ne 0 ]; then
FIRSTPASS=yes
while [[ $CURLCONTINUE != "yes" ]] && [[ $CURLCONTINUE != "no" ]]; do
while [[ $INSTALL != "yes" ]] && [[ $INSTALL != "no" ]]; do
if [[ "$FIRSTPASS" == "yes" ]]; then
echo "We could not access https://securityonionsolutions.com/."
echo "Since packages are downloaded from the internet, internet access is required."
echo "If you would like to ignore this warning and continue anyway, please type 'yes'."
echo "Otherwise, type 'no' to exit."
echo "###########################################"
echo "## ** W A R N I N G ** ##"
echo "## _______________________________ ##"
echo "## ##"
echo "## Installing the Security Onion ##"
echo "## analyst node on this device will ##"
echo "## make permanent changes to ##"
echo "## the system. ##"
echo "## A system reboot will be required ##"
echo "## to complete the install. ##"
echo "## ##"
echo "###########################################"
echo "Do you wish to continue? (Type the entire word 'yes' to proceed or 'no' to exit)"
FIRSTPASS=no
else
echo "Please type 'yes' to continue or 'no' to exit."
fi
read CURLCONTINUE
fi
read INSTALL
done
if [[ "$CURLCONTINUE" == "no" ]]; then
if [[ $INSTALL == "no" ]]; then
echo "Exiting analyst node installation."
exit 0
fi
else
echo "We were able to curl https://securityonionsolutions.com/."
sleep 3
# Add workstation pillar to the minion's pillar file
printf '%s\n'\
"workstation:"\
" gui:"\
" enabled: true"\
"" >> "$pillar_file"
echo "Applying the workstation state. This could take some time since there are many packages that need to be installed."
if salt-call state.apply workstation -linfo queue=True; then # make sure the state ran successfully
echo ""
echo "Analyst workstation has been installed!"
echo "Press ENTER to reboot or Ctrl-C to cancel."
read pause
reboot;
else
echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/logs/salt/minion."
fi
else # workstation is already added
echo "The workstation pillar already exists in $pillar_file."
echo "To enable/disable the gui, set 'workstation:gui:enabled' to true or false in $pillar_file."
echo "Additional documentation can be found at $doc_workstation_url."
fi
# Install a GUI text editor
yum -y install gedit
# Install misc utils
yum -y install wget curl unzip epel-release yum-plugin-versionlock;
# Install xWindows
yum -y groupinstall "X Window System";
yum -y install gnome-classic-session gnome-terminal nautilus-open-terminal control-center liberation-mono-fonts;
unlink /etc/systemd/system/default.target;
ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target;
yum -y install file-roller
# Install Mono - prereq for NetworkMiner
yum -y install mono-core mono-basic mono-winforms expect
# Install NetworkMiner
yum -y install libcanberra-gtk2;
wget https://www.netresec.com/?download=NetworkMiner -O /tmp/nm.zip;
mkdir -p /opt/networkminer/
unzip /tmp/nm.zip -d /opt/networkminer/;
rm /tmp/nm.zip;
mv /opt/networkminer/NetworkMiner_*/* /opt/networkminer/
chmod +x /opt/networkminer/NetworkMiner.exe;
chmod -R go+w /opt/networkminer/AssembledFiles/;
chmod -R go+w /opt/networkminer/Captures/;
# Create networkminer shim
cat << EOF >> /bin/networkminer
#!/bin/bash
/bin/mono /opt/networkminer/NetworkMiner.exe --noupdatecheck "\$@"
EOF
chmod +x /bin/networkminer
# Convert networkminer ico file to png format
yum -y install ImageMagick
convert /opt/networkminer/networkminericon.ico /opt/networkminer/networkminericon.png
# Create menu entry
cat << EOF >> /usr/share/applications/networkminer.desktop
[Desktop Entry]
Name=NetworkMiner
Comment=NetworkMiner
Encoding=UTF-8
Exec=/bin/networkminer %f
Icon=/opt/networkminer/networkminericon-4.png
StartupNotify=true
Terminal=false
X-MultipleArgs=false
Type=Application
MimeType=application/x-pcap;
Categories=Network;
EOF
# Set default monospace font to Liberation
cat << EOF >> /etc/fonts/local.conf
<match target="pattern">
<test name="family" qual="any">
<string>monospace</string>
</test>
<edit binding="strong" mode="prepend" name="family">
<string>Liberation Mono</string>
</edit>
</match>
EOF
# Install Wireshark for Gnome
yum -y install wireshark-gnome;
# Install dnsiff
yum -y install dsniff;
# Install hping3
yum -y install hping3;
# Install netsed
yum -y install netsed;
# Install ngrep
yum -y install ngrep;
# Install scapy
yum -y install python36-scapy;
# Install ssldump
yum -y install ssldump;
# Install tcpdump
yum -y install tcpdump;
# Install tcpflow
yum -y install tcpflow;
# Install tcpxtract
yum -y install tcpxtract;
# Install whois
yum -y install whois;
# Install foremost
yum -y install https://forensics.cert.org/centos/cert/7/x86_64//foremost-1.5.7-13.1.el7.x86_64.rpm;
# Install chromium
yum -y install chromium;
# Install tcpstat
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-tcpstat-1.5.0/securityonion-tcpstat-1.5.0.rpm;
# Install tcptrace
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-tcptrace-6.6.7/securityonion-tcptrace-6.6.7.rpm;
# Install sslsplit
yum -y install libevent;
yum -y install sslsplit;
# Install Bit-Twist
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-bittwist-2.0.0/securityonion-bittwist-2.0.0.rpm;
# Install chaosreader
yum -y install perl-IO-Compress perl-Net-DNS;
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-chaosreader-0.95.10/securityonion-chaosreader-0.95.10.rpm;
chmod +x /bin/chaosreader;
if [ -f ../../files/analyst/README ]; then
cp ../../files/analyst/README /;
cp ../../files/analyst/so-wallpaper.jpg /usr/share/backgrounds/;
cp ../../files/analyst/so-lockscreen.jpg /usr/share/backgrounds/;
cp ../../files/analyst/so-login-logo-dark.svg /usr/share/pixmaps/;
else
cp /opt/so/saltstack/default/salt/common/files/analyst/README /;
cp /opt/so/saltstack/default/salt/common/files/analyst/so-wallpaper.jpg /usr/share/backgrounds/;
cp /opt/so/saltstack/default/salt/common/files/analyst/so-lockscreen.jpg /usr/share/backgrounds/;
cp /opt/so/saltstack/default/salt/common/files/analyst/so-login-logo-dark.svg /usr/share/pixmaps/;
else # if the pillar file doesn't exist
echo "Could not find $pillar_file and add the workstation pillar."
fi
# Set background wallpaper
cat << EOF >> /etc/dconf/db/local.d/00-background
# Specify the dconf path
[org/gnome/desktop/background]
{#- if this is not a manager #}
{% else -%}
# Specify the path to the desktop background image file
picture-uri='file:///usr/share/backgrounds/so-wallpaper.jpg'
# Specify one of the rendering options for the background image:
# 'none', 'wallpaper', 'centered', 'scaled', 'stretched', 'zoom', 'spanned'
picture-options='zoom'
# Specify the left or top color when drawing gradients or the solid color
primary-color='000000'
# Specify the right or bottom color when drawing gradients
secondary-color='FFFFFF'
EOF
echo "Since this is not a manager, the pillar values to enable analyst workstation must be set manually. Please view the documentation at $doc_workstation_url."
# Set lock screen
cat << EOF >> /etc/dconf/db/local.d/00-screensaver
[org/gnome/desktop/session]
idle-delay=uint32 180
{#- endif if this is a manager #}
{% endif -%}
[org/gnome/desktop/screensaver]
lock-enabled=true
lock-delay=uint32 120
picture-options='zoom'
picture-uri='file:///usr/share/backgrounds/so-lockscreen.jpg'
EOF
{#- if not CentOS #}
{%- else %}
cat << EOF >> /etc/dconf/db/local.d/locks/screensaver
/org/gnome/desktop/session/idle-delay
/org/gnome/desktop/screensaver/lock-enabled
/org/gnome/desktop/screensaver/lock-delay
EOF
echo "The Analyst Workstation can only be installed on CentOS. Please view the documentation at $doc_workstation_url."
# Do not show the user list at login screen
cat << EOF >> /etc/dconf/db/local.d/00-login-screen
[org/gnome/login-screen]
logo='/usr/share/pixmaps/so-login-logo-dark.svg'
disable-user-list=true
EOF
{#- endif grains.os == CentOS #}
{% endif -%}
dconf update;
echo
echo "Analyst workstation has been installed!"
echo "Press ENTER to reboot or Ctrl-C to cancel."
read pause
reboot;
exit 0

View File

@@ -120,6 +120,30 @@ check_elastic_license() {
fi
}
check_salt_master_status() {
local timeout=$1
echo "Checking if we can talk to the salt master"
salt-call state.show_top concurrent=true
return
}
check_salt_minion_status() {
local timeout=$1
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1
local status=$?
if [ $status -gt 0 ]; then
echo " Minion did not respond" >> "$setup_log" 2>&1
else
echo " Received job response from salt minion" >> "$setup_log" 2>&1
fi
return $status
}
copy_new_files() {
# Copy new files over to the salt dir
cd $UPDATE_DIR
@@ -367,6 +391,7 @@ run_check_net_err() {
exit $exit_code
fi
}
set_cron_service_name() {
if [[ "$OS" == "centos" ]]; then
cron_service_name="crond"

View File

@@ -18,7 +18,7 @@
. /usr/sbin/so-common
# Check to see if we are already running
IS_RUNNING=$(ps aux | pgrep -f "so-playbook-sync" | wc -l)
[ "$IS_RUNNING" -gt 3 ] && echo "$(date) - Multiple Playbook Sync processes already running...exiting." && exit 0
NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-playbook-sync")
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING Playbook sync processes running...exiting." && exit 0
docker exec so-soctopus python3 playbook_play-sync.py

View File

@@ -32,11 +32,17 @@ copy_new_files() {
# Copy new files over to the salt dir
cd /tmp/sogh/securityonion
git checkout $BRANCH
VERSION=$(cat VERSION)
# We need to overwrite if there is a repo file
if [ -d /opt/so/repo ]; then
tar -czf /opt/so/repo/"$VERSION".tar.gz -C "$(pwd)/.." .
fi
rsync -a salt $default_salt_dir/
rsync -a pillar $default_salt_dir/
chown -R socore:socore $default_salt_dir/salt
chown -R socore:socore $default_salt_dir/pillar
chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
rm -rf /tmp/sogh
}

View File

@@ -115,8 +115,8 @@ clean() {
}
# Check to see if we are already running
IS_RUNNING=$(ps aux | pgrep -f "so-sensor-clean" | wc -l)
[ "$IS_RUNNING" -gt 3 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-sensor-clean")
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
while [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; do

View File

@@ -34,7 +34,15 @@ check_err() {
local err_msg="Unhandled error occured, please check $SOUP_LOG for details."
[[ $ERR_HANDLED == true ]] && exit $exit_code
if [[ $exit_code -ne 0 ]]; then
set +e
systemctl_func "start" "$cron_service_name"
systemctl_func "start" "salt-master"
systemctl_func "start" "salt-minion"
enable_highstate
printf '%s' "Soup failed with error $exit_code: "
case $exit_code in
2)
@@ -91,10 +99,7 @@ check_err() {
if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then
echo "$err_msg"
fi
set +e
systemctl_func "start" "$cron_service_name"
echo "Ensuring highstate is enabled."
salt-call state.enable highstate --local
exit $exit_code
fi
@@ -366,6 +371,12 @@ clone_to_tmp() {
fi
}
enable_highstate() {
echo "Enabling highstate."
salt-call state.enable highstate -l info --local
echo ""
}
generate_and_clean_tarballs() {
local new_version
new_version=$(cat $UPDATE_DIR/VERSION)
@@ -411,6 +422,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
[[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100
[[ "$INSTALLEDVERSION" == 2.3.100 ]] && up_to_2.3.110
[[ "$INSTALLEDVERISON" == 2.3.110 ]] && up_to_2.3.120
true
}
@@ -424,6 +436,8 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90
[[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100
[[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110
[[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120
true
}
@@ -487,15 +501,23 @@ post_to_2.3.110() {
POSTVERSION=2.3.110
}
post_to_2.3.120() {
echo "Post Processing for 2.3.120"
POSTVERSION=2.3.120
sed -i '/so-thehive-es/d;/so-thehive/d;/so-cortex/d' /opt/so/conf/so-status/so-status.conf
}
stop_salt_master() {
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
set +e
echo ""
echo "Killing all Salt jobs across the grid."
salt \* saltutil.kill_all_jobs
salt \* saltutil.kill_all_jobs >> $SOUP_LOG 2>&1
echo ""
echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt"
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
set -e
echo ""
@@ -723,9 +745,6 @@ up_to_2.3.90() {
up_to_2.3.100() {
fix_wazuh
echo "Removing /opt/so/state files for patched Salt InfluxDB module and state. This is due to Salt being upgraded and needing to patch the files again."
rm -vrf /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdb_retention_policy.py.patched /opt/so/state/influxdbmod.py.patched
echo "Adding receiver hostgroup with so-firewall"
if so-firewall addhostgroup receiver 2>&1 | grep -q 'Already exists'; then
echo 'receiver hostgroup already exists'
@@ -738,11 +757,16 @@ up_to_2.3.100() {
}
up_to_2.3.110() {
echo "Updating to Security Onion 2.3.110"
echo "Updating shard settings for Elasticsearch index templates"
sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls
}
up_to_2.3.120() {
# Stop thehive services since these will be broken in .120
so-thehive-stop
so-thehive-es-stop
so-cortex-stop
}
verify_upgradespace() {
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
if [ "$CURRENTSPACE" -lt "10" ]; then
@@ -857,7 +881,7 @@ upgrade_salt() {
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
@@ -866,11 +890,29 @@ upgrade_salt() {
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
echo "Checking if Salt was upgraded."
echo ""
# Check that Salt was upgraded
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 0
else
echo "Salt upgrade success."
echo ""
echo "Removing /opt/so/state files for patched Salt InfluxDB module and state. This is due to Salt being upgraded and needing to patch the files again."
rm -vrf /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdb_retention_policy.py.patched /opt/so/state/influxdbmod.py.patched
fi
}
update_repo() {
echo "Performing repo changes."
if [[ "$OS" == "centos" ]]; then
echo "Performing repo changes."
# Import GPG Keys
gpg_rpm_import
echo "Disabling fastestmirror."
@@ -890,6 +932,21 @@ update_repo() {
yum clean all
yum repolist
fi
elif [[ "$OS" == "ubuntu" ]]; then
ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then
OSVER=bionic
elif grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
OSVER=focal
else
echo "We do not support your current version of Ubuntu."
exit 1
fi
rm -f /etc/apt/sources.list.d/salt.list
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt $OSVER main" > /etc/apt/sources.list.d/saltstack.list
apt-get update
fi
}
@@ -922,6 +979,8 @@ verify_latest_update_script() {
apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
fix_wazuh
elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
2_3_10_hotfix_1
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
fi
@@ -943,6 +1002,28 @@ fix_wazuh() {
fi
}
#upgrade salt to 3004.1
2_3_10_hotfix_1() {
systemctl_func "stop" "$cron_service_name"
# update mine items prior to stopping salt-minion and salt-master
update_salt_mine
stop_salt_minion
stop_salt_master
update_repo
# Does salt need upgraded. If so update it.
if [[ $UPGRADESALT -eq 1 ]]; then
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
fi
rm -f /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdbmod.py.patched /opt/so/state/influxdb_retention_policy.py.patched
systemctl_func "start" "salt-master"
salt-call state.apply salt.python3-influxdb -l info
systemctl_func "start" "salt-minion"
systemctl_func "start" "$cron_service_name"
}
main() {
trap 'check_err $?' EXIT
@@ -954,6 +1035,17 @@ main() {
echo "### Preparing soup at $(date) ###"
echo ""
set_os
set_cron_service_name
if ! check_salt_master_status; then
echo "Could not talk to salt master"
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "SOUP will now attempt to start the salt-master service and exit."
exit 1
fi
echo "This node can communicate with the salt-master."
echo "Checking to see if this is a manager."
echo ""
require_manager
@@ -989,8 +1081,6 @@ main() {
echo "Verifying we have the latest soup script."
verify_latest_update_script
echo ""
set_os
set_cron_service_name
set_palette
check_elastic_license
echo ""
@@ -1012,12 +1102,19 @@ main() {
upgrade_check_salt
set -e
if [[ $is_airgap -eq 0 ]]; then
update_centos_repo
yum clean all
check_os_updates
fi
if [ "$is_hotfix" == "true" ]; then
echo "Applying $HOTFIXVERSION hotfix"
copy_new_files
apply_hotfix
echo "Hotfix applied"
update_version
enable_highstate
salt-call state.highstate -l info queue=True
else
echo ""
@@ -1032,9 +1129,6 @@ main() {
echo "Updating dockers to $NEWVERSION."
if [[ $is_airgap -eq 0 ]]; then
airgap_update_dockers
update_centos_repo
yum clean all
check_os_updates
# if not airgap but -f was used
elif [[ ! -z "$ISOLOC" ]]; then
airgap_update_dockers
@@ -1057,21 +1151,6 @@ main() {
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
echo "Checking if Salt was upgraded."
echo ""
# Check that Salt was upgraded
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 0
else
echo "Salt upgrade success."
echo ""
fi
fi
preupgrade_changes
@@ -1127,9 +1206,7 @@ main() {
echo ""
fi
echo "Enabling highstate."
salt-call state.enable highstate -l info --local
echo ""
enable_highstate
echo ""
echo "Running a highstate. This could take several minutes."

View File

@@ -4064,7 +4064,7 @@ elasticsearch:
field: "@timestamp"
order: desc
refresh_interval: 30s
number_of_shards: 1
number_of_shards: 2
number_of_replicas: 0
composed_of:
- agent-mappings

View File

@@ -0,0 +1,127 @@
{
"description": "RITA Beacons",
"processors": [
{
"set": {
"field": "_index",
"value": "so-rita",
"override": true
}
},
{
"csv": {
"field": "message",
"target_fields": [
"beacon.score",
"source.ip",
"destination.ip",
"network.connections",
"network.average_bytes",
"beacon.interval.range",
"beacon.size.range",
"beacon.interval.top",
"beacon.size.top",
"beacon.interval.top_count",
"beacon.size.top_count",
"beacon.interval.skew",
"beacon.size.skew",
"beacon.interval.dispersion",
"beacon.size.dispersion",
"network.bytes"
]
}
},
{
"convert": {
"field": "beacon.score",
"type": "float"
}
},
{
"convert": {
"field": "network.connections",
"type": "integer"
}
},
{
"convert": {
"field": "network.average_bytes",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.interval.range",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.size.range",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.interval.top",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.size.top",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.interval.top_count",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.size.top_count",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.interval.skew",
"type": "float"
}
},
{
"convert": {
"field": "beacon.size.skew",
"type": "float"
}
},
{
"convert": {
"field": "beacon.interval.dispersion",
"type": "integer"
}
},
{
"convert": {
"field": "beacon.size.dispersion",
"type": "integer"
}
},
{
"convert": {
"field": "network.bytes",
"type": "integer"
}
},
{ "set": { "if": "ctx.beacon?.score == 1", "field": "dataset", "value": "alert", "override": true }},
{ "set": { "if": "ctx.beacon?.score == 1", "field": "rule.name", "value": "Potential C2 Beacon Activity", "override": true }},
{ "set": { "if": "ctx.beacon?.score == 1", "field": "event.severity", "value": 3, "override": true }},
{
"pipeline": {
"name": "common"
}
}
]
}

View File

@@ -0,0 +1,36 @@
{
"description": "RITA Connections",
"processors": [
{
"set": {
"field": "_index",
"value": "so-rita",
"override": true
}
},
{
"dissect": {
"field": "message",
"pattern": "%{source.ip},%{destination.ip},%{network.port}:%{network.protocol}:%{network.service},%{connection.duration},%{connection.state}"
}
},
{
"convert": {
"field": "connection.duration",
"type": "float"
}
},
{
"set": {
"field": "event.duration",
"value": "{{ connection.duration }}",
"override": true
}
},
{
"pipeline": {
"name": "common"
}
}
]
}

View File

@@ -0,0 +1,39 @@
{
"description": "RITA DNS",
"processors": [
{
"set": {
"field": "_index",
"value": "so-rita",
"override": true
}
},
{
"csv": {
"field": "message",
"target_fields": [
"dns.question.name",
"dns.question.subdomain_count",
"dns.question.count"
]
}
},
{
"convert": {
"field": "dns.question.subdomain_count",
"type": "integer"
}
},
{
"convert": {
"field": "dns.question.count",
"type": "integer"
}
},
{
"pipeline": {
"name": "common"
}
}
]
}

View File

@@ -1,36 +1,157 @@
{
"description" : "syslog",
"description" : "syslog pipeline",
"processors" : [
{
"dissect": {
"field": "message",
"pattern" : "%{message}",
"on_failure": [ { "drop" : { } } ]
},
"remove": {
"field": [ "type", "agent" ],
"ignore_failure": true
}
"dissect": {
"field": "message",
"pattern" : "%{message}",
"on_failure": [ { "drop" : { } } ]
},
"remove": {
"field": [ "type", "agent" ],
"ignore_failure": true
}
}, {
"grok": {
"field": "message",
"patterns": [
"^<%{INT:syslog.priority:int}>%{TIMESTAMP_ISO8601:syslog.timestamp} +%{IPORHOST:syslog.host} +%{PROG:syslog.program}(?:\\[%{POSINT:syslog.pid:int}\\])?: %{GREEDYDATA:real_message}$",
"^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}(\\[%{DATA:pid}\\])?: %{GREEDYDATA:real_message}$",
"^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$"
],
"ignore_failure": true
}
},
{
"grok":
{
"field": "message",
"patterns": [
"^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}(\\[%{DATA:pid}\\])?: %{GREEDYDATA:real_message}$",
"^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$"
],
"ignore_failure": true
}
"convert" : {
"if": "ctx?.syslog?.priority != null",
"field" : "syslog.priority",
"type": "integer"
}
},
{ "set": { "if": "ctx.source?.application == 'filterlog'", "field": "dataset", "value": "firewall", "ignore_failure": true } },
{ "set": { "if": "ctx.vendor != null", "field": "module", "value": "{{ vendor }}", "ignore_failure": true } },
{ "set": { "if": "ctx.product != null", "field": "dataset", "value": "{{ product }}", "ignore_failure": true } },
{ "set": { "field": "event.ingested", "value": "{{ @timestamp }}" } },
{ "date": { "if": "ctx.syslog?.timestamp != null", "field": "syslog.timestamp", "target_field": "@timestamp", "formats": ["MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601", "UNIX"], "ignore_failure": true } },
{ "remove": { "field": ["pid", "program"], "ignore_missing": true, "ignore_failure": true } },
{ "pipeline": { "if": "ctx.vendor != null && ctx.product != null", "name": "{{ vendor }}.{{ product }}", "ignore_failure": true } },
{ "pipeline": { "if": "ctx.dataset == 'firewall'", "name": "filterlog", "ignore_failure": true } },
{ "pipeline": { "name": "common" } }
{
"script": {
"description": "Map syslog priority into facility and level",
"lang": "painless",
"params" : {
"level": [
"emerg",
"alert",
"crit",
"err",
"warn",
"notice",
"info",
"debug"
],
"facility" : [
"kern",
"user",
"mail",
"daemon",
"auth",
"syslog",
"lpr",
"news",
"uucp",
"cron",
"authpriv",
"ftp",
"ntp",
"security",
"console",
"solaris-cron",
"local0",
"local1",
"local2",
"local3",
"local4",
"local5",
"local6",
"local7"
]
},
"source": "if (ctx['syslog'] != null && ctx['syslog']['priority'] != null) { int p = ctx['syslog']['priority']; int f = p / 8; int l = p - (f * 8); ctx['syslog']['facility_label'] = [ : ]; ctx['syslog']['severity_label'] = [ : ]; ctx['syslog'].put('severity', l); ctx['syslog'].put('severity_label', params.level[l].toUpperCase()); ctx['syslog'].put('facility', f); ctx['syslog'].put('facility_label', params.facility[f].toUpperCase()); }"
}
},
{
"set": {
"if": "ctx.syslog?.host != null",
"field": "host.name",
"value": "{{ syslog.host }}",
"ignore_failure": true
}
}, {
"set": {
"if": "ctx.syslog?.program != null",
"field": "process.name",
"value": "{{ syslog.program }}",
"ignore_failure": true
}
}, {
"set": {
"if": "ctx.syslog?.pid != null",
"field": "process.id",
"value": "{{ syslog.pid }}",
"ignore_failure": true
}
}, {
"set": {
"if": "ctx.source?.application == 'filterlog'",
"field": "dataset",
"value": "firewall",
"ignore_failure": true
}
}, {
"set": {
"if": "ctx.vendor != null",
"field": "module",
"value": "{{ vendor }}",
"ignore_failure": true
}
}, {
"set": {
"if": "ctx.product != null",
"field": "dataset",
"value": "{{ product }}",
"ignore_failure": true
}
}, {
"set": {
"field": "ingest.timestamp",
"value": "{{ @timestamp }}"
}
}, {
"date": {
"if": "ctx.syslog?.timestamp != null",
"field": "syslog.timestamp",
"target_field": "@timestamp",
"formats": ["MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601", "UNIX"],
"ignore_failure": true
}
}, {
"remove": {
"field": ["pid", "program"],
"ignore_missing": true,
"ignore_failure": true
}
}, {
"pipeline": {
"if": "ctx.vendor != null && ctx.product != null",
"name": "{{ vendor }}.{{ product }}",
"ignore_failure": true
}
}, {
"pipeline": {
"if": "ctx.dataset == 'firewall'",
"name": "filterlog",
"ignore_failure": true
}
}, {
"pipeline": { "name": "common" }
}
]
}

View File

@@ -11,10 +11,17 @@ appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log.gz
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = /var/log/elasticsearch
appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = *.gz
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
appender.rolling.strategy.action.condition.nested_condition.age = 7D
rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling

View File

@@ -60,6 +60,32 @@
},
"type": "wildcard"
},
"entity_id": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
},
"executable": {
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
},
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"fields": {
"keyword": {
@@ -73,6 +99,133 @@
"ignore_above": 1024,
"type": "keyword"
},
"parent": {
"properties": {
"command_line": {
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"text": {
"type": "match_only_text"
},
"keyword": {
"type": "keyword"
}
},
"type": "wildcard"
},
"entity_id": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
},
"executable": {
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
},
"pe": {
"properties": {
"architecture": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
},
"company": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
},
"description": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
},
"file_version": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
},
"original_file_name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
},
"product": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
}
}
}
},
"pid": {
"type": "long",
"fields": {
@@ -88,6 +241,19 @@
"type": "keyword"
}
}
},
"working_directory": {
"fields": {
"security": {
"type": "text",
"analyzer": "es_security_analyzer"
},
"keyword": {
"type": "keyword"
}
},
"ignore_above": 1024,
"type": "keyword"
}
}
}

View File

@@ -33,6 +33,8 @@ while [[ "$COUNT" -le 240 ]]; do
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
# Check cluster health once connected
so-elasticsearch-query _cluster/health?wait_for_status=yellow > /dev/null 2>&1
break
else
((COUNT+=1))

View File

@@ -10,6 +10,7 @@
{%- set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('global:wazuh', '0') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
{%- set RITAENABLED = salt['pillar.get']('rita:enabled', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{%- set FBMEMEVENTS = salt['pillar.get']('filebeat:mem_events', 2048) -%}
@@ -267,6 +268,54 @@ filebeat.inputs:
{%- endif %}
{%- if RITAENABLED %}
- type: filestream
paths:
- /nsm/rita/beacons.csv
exclude_lines: ['^Score', '^Source', '^Domain', '^No results']
fields:
module: rita
dataset: beacon
category: network
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
pipeline: "rita.beacon"
index: "so-rita"
- type: filestream
paths:
- /nsm/rita/long-connections.csv
- /nsm/rita/open-connections.csv
exclude_lines: ['^Source', '^No results']
fields:
module: rita
dataset: connection
category: network
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
pipeline: "rita.connection"
index: "so-rita"
- type: filestream
paths:
- /nsm/rita/exploded-dns.csv
exclude_lines: ['^Domain', '^No results']
fields:
module: rita
dataset: dns
category: network
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
pipeline: "rita.dns"
index: "so-rita"
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
- type: filestream
paths:

View File

@@ -349,6 +349,9 @@ role:
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
strelka_frontend:
portgroups:
- {{ portgroups.strelka_frontend }}
syslog:
portgroups:
- {{ portgroups.syslog }}
@@ -482,6 +485,9 @@ role:
self:
portgroups:
- {{ portgroups.syslog}}
strelka_frontend:
portgroups:
- {{ portgroups.strelka_frontend }}
INPUT:
hostgroups:
anywhere:
@@ -511,6 +517,9 @@ role:
self:
portgroups:
- {{ portgroups.syslog}}
strelka_frontend:
portgroups:
- {{ portgroups.strelka_frontend }}
INPUT:
hostgroups:
anywhere:

View File

@@ -19,11 +19,37 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %}
include:
- idh.openssh.config
- firewall
# IDH State
# If True, block IDH Services from accepting connections on Managment IP
{% if RESTRICTIDHSERVICES %}
{% from 'idh/opencanary_config.map.jinja' import OPENCANARYCONFIG %}
{% set idh_services = salt['pillar.get']('idh:services', []) %}
{% for service in idh_services %}
{% if service in ["smnp","ntp", "tftp"] %}
{% set proto = 'udp' %}
{% else %}
{% set proto = 'tcp' %}
{% endif %}
block_mgt_ip_idh_services_{{ proto }}_{{ OPENCANARYCONFIG[service~'.port'] }} :
iptables.insert:
- table: filter
- chain: INPUT
- jump: DROP
- position: 1
- proto: {{ proto }}
- dport: {{ OPENCANARYCONFIG[service~'.port'] }}
- destination: {{ MAINIP }}
{% endfor %}
{% endif %}
# Create a config directory
temp:

View File

@@ -1,4 +1,4 @@
{% set measurements = salt['cmd.shell']('docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -database telegraf -execute "show measurements" 2> /root/measurement_query.log | jq -r .results[0].series[0].values[]?[0] 2>> /root/measurement_query.log') %}
{% set measurements = salt['cmd.shell']('docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -database telegraf -execute "show measurements" 2> /root/measurement_query.log | jq -r .results[0].series[0].values[]?[0] 2>> /root/measurement_query.log', shell='/bin/bash') %}
influxdb:
retention_policies:

View File

@@ -59,7 +59,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do
RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/8.1.0" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/8.2.0" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done

View File

@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.1.0","id": "8.1.0","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.2.0","id": "8.2.0","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

View File

@@ -18,7 +18,7 @@ appender.rolling.name = rolling
appender.rolling.fileName = /var/log/logstash/logstash.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.rolling.filePattern = /var/log/logstash/logstash-%d{yyyy-MM-dd}.log
appender.rolling.filePattern = /var/log/logstash/logstash-%d{yyyy-MM-dd}.log.gz
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
@@ -27,7 +27,7 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = /var/log/logstash
appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = logstash-*.log
appender.rolling.strategy.action.condition.glob = *.gz
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
appender.rolling.strategy.action.condition.nested_condition.age = 7D
rootLogger.level = info

View File

@@ -0,0 +1,22 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
output {
if [module] =~ "rita" and "import" not in [tags] {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
{% endif %}
index => "so-rita"
ssl => true
ssl_certificate_verification => false
}
}
}

View File

@@ -0,0 +1,98 @@
{% from 'repo/client/map.jinja' import ABSENTFILES with context %}
{% from 'repo/client/map.jinja' import REPOPATH with context %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
{% set managerupdates = salt['pillar.get']('global:managerupdate', 0) %}
{% set role = grains.id.split('_') | last %}
# from airgap state
{% if ISAIRGAP and grains.os == 'CentOS' %}
{% set MANAGER = salt['grains.get']('master') %}
airgapyum:
file.managed:
- name: /etc/yum/yum.conf
- source: salt://repo/client/files/centos/airgap/yum.conf
airgap_repo:
pkgrepo.managed:
- humanname: Airgap Repo
- baseurl: https://{{ MANAGER }}/repo
- gpgcheck: 0
- sslverify: 0
{% endif %}
# from airgap and common
{% if ABSENTFILES|length > 0%}
{% for file in ABSENTFILES %}
{{ file }}:
file.absent:
- name: {{ REPOPATH }}{{ file }}
- onchanges_in:
- cmd: cleanyum
{% endfor %}
{% endif %}
# from common state
# Remove default Repos
{% if grains['os'] == 'CentOS' %}
repair_yumdb:
cmd.run:
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
- onlyif:
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
crsynckeys:
file.recurse:
- name: /etc/pki/rpm_gpg
- source: salt://repo/client/files/centos/keys/
{% if not ISAIRGAP %}
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
remove_securityonionrepocache:
file.absent:
- name: /etc/yum.repos.d/securityonioncache.repo
{% endif %}
{% if role not in ['eval', 'standalone', 'import', 'manager', 'managersearch'] and managerupdates == 1 %}
remove_securityonionrepo:
file.absent:
- name: /etc/yum.repos.d/securityonion.repo
{% endif %}
crsecurityonionrepo:
file.managed:
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
- name: /etc/yum.repos.d/securityonion.repo
- source: salt://repo/client/files/centos/securityonion.repo
{% else %}
- name: /etc/yum.repos.d/securityonioncache.repo
- source: salt://repo/client/files/centos/securityonioncache.repo
{% endif %}
- mode: 644
yumconf:
file.managed:
- name: /etc/yum.conf
- source: salt://repo/client/files/centos/yum.conf.jinja
- mode: 644
- template: jinja
- show_changes: False
cleanairgap:
file.absent:
- name: /etc/yum.repos.d/airgap_repo.repo
{% endif %}
cleanyum:
cmd.run:
- name: 'yum clean metadata'
- onchanges:
{% if ISAIRGAP %}
- file: airgapyum
- pkgrepo: airgap_repo
{% else %}
- file: crsecurityonionrepo
- file: yumconf
{% endif %}
{% endif %}

View File

@@ -1,98 +1,2 @@
{% from 'repo/client/map.jinja' import ABSENTFILES with context %}
{% from 'repo/client/map.jinja' import REPOPATH with context %}
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
{% set managerupdates = salt['pillar.get']('global:managerupdate', 0) %}
{% set role = grains.id.split('_') | last %}
# from airgap state
{% if ISAIRGAP and grains.os == 'CentOS' %}
{% set MANAGER = salt['grains.get']('master') %}
airgapyum:
file.managed:
- name: /etc/yum/yum.conf
- source: salt://repo/client/files/centos/airgap/yum.conf
airgap_repo:
pkgrepo.managed:
- humanname: Airgap Repo
- baseurl: https://{{ MANAGER }}/repo
- gpgcheck: 0
- sslverify: 0
{% endif %}
# from airgap and common
{% if ABSENTFILES|length > 0%}
{% for file in ABSENTFILES %}
{{ file }}:
file.absent:
- name: {{ REPOPATH }}{{ file }}
- onchanges_in:
- cmd: cleanyum
{% endfor %}
{% endif %}
# from common state
# Remove default Repos
{% if grains['os'] == 'CentOS' %}
repair_yumdb:
cmd.run:
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
- onlyif:
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
crsynckeys:
file.recurse:
- name: /etc/pki/rpm_gpg
- source: salt://repo/client/files/centos/keys/
{% if not ISAIRGAP %}
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
remove_securityonionrepocache:
file.absent:
- name: /etc/yum.repos.d/securityonioncache.repo
{% endif %}
{% if role not in ['eval', 'standalone', 'import', 'manager', 'managersearch'] and managerupdates == 1 %}
remove_securityonionrepo:
file.absent:
- name: /etc/yum.repos.d/securityonion.repo
{% endif %}
crsecurityonionrepo:
file.managed:
{% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
- name: /etc/yum.repos.d/securityonion.repo
- source: salt://repo/client/files/centos/securityonion.repo
{% else %}
- name: /etc/yum.repos.d/securityonioncache.repo
- source: salt://repo/client/files/centos/securityonioncache.repo
{% endif %}
- mode: 644
yumconf:
file.managed:
- name: /etc/yum.conf
- source: salt://repo/client/files/centos/yum.conf.jinja
- mode: 644
- template: jinja
- show_changes: False
cleanairgap:
file.absent:
- name: /etc/yum.repos.d/airgap_repo.repo
{% endif %}
cleanyum:
cmd.run:
- name: 'yum clean metadata'
- onchanges:
{% if ISAIRGAP %}
- file: airgapyum
- pkgrepo: airgap_repo
{% else %}
- file: crsecurityonionrepo
- file: yumconf
{% endif %}
{% endif %}
include:
- repo.client.{{grains.os | lower}}

View File

@@ -0,0 +1,20 @@
# this removes the repo file left by bootstrap-salt.sh without -r
remove_salt.list:
file.absent:
- name: /etc/apt/sources.list.d/salt.list
saltstack.list:
file.managed:
- name: /etc/apt/sources.list.d/saltstack.list
- contents:
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt/ {{grains.oscodename}} main
apt_update:
cmd.run:
- name: apt-get update
- onchanges:
- file: saltstack.list
- timeout: 30
- retry:
attempts: 5
interval: 30

View File

@@ -11,6 +11,7 @@
{% set PYTHON3INFLUX= 'influxdb == ' ~ PYTHONINFLUXVERSION %}
{% set PYTHON3INFLUXDEPS= ['certifi', 'chardet', 'python-dateutil', 'pytz', 'requests'] %}
{% set PYTHONINSTALLER = 'pip' %}
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
{% else %}
{% set SPLITCHAR = '-' %}
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
@@ -21,6 +22,7 @@
{% set PYTHON3INFLUX= 'securityonion-python3-influxdb' %}
{% set PYTHON3INFLUXDEPS= ['python36-certifi', 'python36-chardet', 'python36-dateutil', 'python36-pytz', 'python36-requests'] %}
{% set PYTHONINSTALLER = 'pkg' %}
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
{% endif %}
{% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %}
@@ -29,7 +31,7 @@
{% if grains.os|lower in ['centos', 'redhat'] %}
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% elif grains.os|lower == 'ubuntu' %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% endif %}
{% else %}
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
version: 3004
version: 3004.1

View File

@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
version: 3004
version: 3004.1
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds.

View File

@@ -3,6 +3,7 @@
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
{% from 'salt/map.jinja' import SALTNOTHELD %}
{% from 'salt/map.jinja' import SALTPACKAGES %}
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
{% set service_start_delay = SALTMINION.salt.minion.service_start_delay %}
@@ -31,6 +32,22 @@ install_salt_minion:
exec 1>&- # close stdout
exec 2>&- # close stderr
nohup /bin/sh -c '{{ UPGRADECOMMAND }}' &
{# if we are the salt master #}
{% if grains.id.split('_')|first == grains.master %}
remove_influxdb_continuous_query_state_file:
file.absent:
- name: /opt/so/state/influxdb_continuous_query.py.patched
remove_influxdbmod_state_file:
file.absent:
- name: /opt/so/state/influxdbmod.py.patched
remove_influxdb_retention_policy_state_file:
file.absent:
- name: /opt/so/state/influxdb_retention_policy.py.patched
{% endif %}
{% endif %}
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
@@ -66,7 +83,7 @@ set_log_levels:
salt_minion_service_unit_file:
file.managed:
- name: /etc/systemd/system/multi-user.target.wants/salt-minion.service
- name: {{ SYSTEMD_UNIT_FILE }}
- source: salt://salt/service/salt-minion.service.jinja
- template: jinja
- defaults:
@@ -94,6 +111,7 @@ salt_minion_service:
- file: set_log_levels
- file: salt_minion_service_unit_file
{% endif %}
- order: last
patch_pkg:

View File

@@ -0,0 +1,248 @@
# Security Onion Analyzers
Security Onion provides a means for performing data analysis on varying inputs. This data can be any data of interest sourced from event logs. Examples include hostnames, IP addresses, file hashes, URLs, etc. The analysis is conducted by one or more analyzers that understand that type of input. Analyzers come with the default installation of Security Onion. However, it is also possible to add additional analyzers to extend the analysis across additional areas or data types.
## Supported Observable Types
The built-in analyzers support the following observable types:
| Name | Domain | Hash | IP | JA3 | Mail | Other | URI | URL | User Agent |
| ------------------------|--------|-------|-------|-------|-------|-------|-------|-------|------------
| Alienvault OTX |&check; |&check;|&check;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| EmailRep |&cross; |&cross;|&cross;|&cross;|&check;|&cross;|&cross;|&cross;|&cross;|
| Greynoise |&cross; |&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|
| JA3er |&cross; |&cross;|&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&cross;|
| LocalFile |&check; |&check;|&check;|&check;|&cross;|&check;|&cross;|&check;|&cross;|
| Pulsedive |&check; |&check;|&check;|&cross;|&cross;|&cross;|&check;|&check;|&check;|
| Spamhaus |&cross; |&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|
| Urlhaus |&cross; |&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Urlscan |&cross; |&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Virustotal |&check; |&check;|&check;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
## Developer Guide
### Python
Analyzers are Python modules, and can be made up of a single .py script, for simpler analyzers, or a complex set of scripts organized within nested directories.
The Python language was chosen because of it's wide adoption in the security industry, ease of development and testing, and the abundance of developers with Python skills.
Specifically, analyzers must be compatible with Python 3.10.
For more information about Python, see the [Python Documentation](https://docs.python.org).
### Development
Custom analyzers should be developed outside of the Security Onion cluster, in a proper software development environment, with version control or other backup mechanisms in place. The analyzer can be developed, unit tested, and integration tested without the need for a Security Onion installation. Once satisifed with the analyzer functionality the analyzer directory should be copied to the Security Onion manager node.
Developing an analyzer directly on a Security Onion manager node is strongly discouraged, as loss of source code (and time and effort) can occur, should the management node suffer a catastrophic failure with disk storage loss.
For best results, avoid long, complicated functions in favor of short, discrete functions. This has several benefits:
- Easier to troubleshoot
- Easier to maintain
- Easier to unit test
- Easier for other developers to review
### Linting
Source code should adhere to the [PEP 8 - Style Guide for Python Code](https://peps.python.org/pep-0008/). Developers can use the default configuration of `flake8` to validate conformance, or run the included `build.sh` inside the analyzers directory. Note that linting conformance is mandatory for analyzers that are contributed back to the Security Onion project.
### Testing
Python's [unitest](https://docs.python.org/3/library/unittest.html) library can be used for covering analyzer code with unit tests. Unit tests are encouraged for custom analyzers, and mandatory for public analyzers submitted back to the Security Onion project.
If you are new to unit testing, please see the included `urlhaus_test.py` as an example.
Unit tests should be named following the pattern `<scriptname>_test.py`.
### Analyzer Package Structure
Delpoyment of a custom analyzer entails copying the analyzer source directory and depenency wheel archives to the Security Onion manager node. The destination locations can be found inside the `securityonion` salt source directory tree. Using the [Saltstack](https://github.com/saltstack/salt) directory pattern allows Security Onion developers to add their own analyzers with minimal additional effort needed to upgrade to newer versions of Security Onion. When the _sensoroni_ salt state executes it will merge the default analyzers with any local analyzers, and copy the merged analyzers into the `/opt/so/conf/sensoroni` directory.
Do not modify files in the `/opt/so/conf/sensoroni` directory! This is a generated directory and changes made inside will be automatically erased on a frequent interval.
On a Security Onion manager, custom analyzers should be placed inside the `/opt/so/saltstack/local/salt/sensoroni` directory, as described in the next section.
#### Directory Tree
From within the default saltstack directory, the following files and directories exist:
```
salt
|- sensoroni
|- files
|- analyzers
|- urlhaus <- Example of an existing analyzer
| |- source-packages <- Contains wheel package bundles for this analyzer's dependencies
| |- site-packages <- Auto-generated site-packages directory (or used for custom dependencies)
| |- requirements.txt <- List of all dependencies needed for this analyzer
| |- urlhaus.py <- Source code for the analyzer
| |- urlhaus_test.py <- Unit tests for the analyzer source code
| |- urlhaus.json <- Metadata for the analyzer
| |- __init__.py <- Package initialization file, often empty
|
|- build.sh <- Simple CI tool for validating linting and unit tests
|- helpers.py <- Common functions shared by many analyzers
|- helpers_test.py <- Unit tests for the shared source code
|- pytest.ini <- Configuration options for the flake8 and pytest
|- README.md <- The file you are currently reading
```
Custom analyzers should conform to this same structure, but instead of being placed in the `/opt/so/saltstack/default` directory tree, they should be placed in the `/opt/so/saltstack/local` directory tree. This ensures future Security Onion upgrades will not overwrite customizations. Shared files like `build.sh` and `helpers.py` do not need to be duplicated. They can remain in the _default_ directory tree. Only new or modified files should exist in the _local_ directory tree.
#### Metadata
Each analyzer has certain metadata that helps describe the function of the analyzer, required inputs, artifact compatibility, optional configuration options, analyzer version, and other important details of the analyzer. This file is a static file and is not intended to be used for dynamic or custom configuration options. It should only be modified by the author of the analyzer.
The following example describes the urlhaus metadata content:
```
{
"name": "Urlhaus", <- Unique human-friendly name of this analyzer
"version": "0.1", <- The version of the analyzer
"author": "Security Onion Solutions", <- Author's name, and/or email or other contact information
"description": "This analyzer queries URLHaus...", <- A brief, concise description of the analyzer
"supportedTypes" : ["url"], <- List of types that must match the SOC observable types
"baseUrl": "https://urlhaus-api.abuse.ch/v1/url/" <- Optional hardcoded data used by the analyzer
}
```
The `supportedTypes` values should only contain the types that this analyzer can work with. In the case of the URLHaus analyzer, we know that it works with URLs. So adding "hash" to this list wouldn't make sense, since URLHaus doesn't provide information about file hashes. If an analyzer does not support a particular type then it will not show up in the analyzer results in SOC for that observable being analyzed. This is intentional, to eliminate unnecessary screen clutter in SOC. To find a list of available values for the `supportedTypes` field, login to SOC and inside of a Case, click the + button on the Observables tab. You will see a list of types and each of those can be used in this metadata field, when applicable to the analyzer.
#### Dependencies
Analyzers will often require the use of third-party packages. For example, if an analyzer needs to make a request to a remote server via HTTPS, then the `requests` package will likely be used. Each analyzer will container a `requirements.txt` file, in which all third-party dependencies can be specified, following the python [Requirements File Specification](https://pip.pypa.io/en/stable/reference/requirements-file-format/).
Additionally, to support airgapped users, the dependency packages themselves, and any transitive dependencies, should be placed inside the `source-packages` directory. To obtain the full hierarchy of dependencies, execute the following commands:
```bash
pip download -r <my-analyzer-path>/requirements.txt -d <my-analyzer-path>/source-packages
```
### Analyzer Architecture
The Sensoroni Docker container is responsible for executing analyzers. Only the manager's Sensoroni container will process analyzer jobs. Other nodes in the grid, such as sensors and search nodes, will not be assigned analyzer jobs.
When the Sensoroni Docker container starts, the `/opt/so/conf/sensoroni/analyzer` directory is mapped into the container. The initialization of the Sensoroni Analyze module will scan that directory for any subdirectories. Valid subdirectories will be added as an available analyzer.
The analyzer itself will only run when a user in SOC enqueues an analyzer job, such as via the Cases -> Observables tab. When the Sensoroni node is ready to run the job it will execute the python command interpretor separately for each loaded analyzer. The command line resembles the following:
```bash
python -m urlhaus '{"artifactType":"url","value":"https://bigbadbotnet.invalid",...}'
```
It is up to each analyzer to determine whether the provided input is compatible with that analyzer. This is assisted by the analyzer metadata, as described earlier in this document, with the use of the `supportedTypes` list.
Once the analyzer completes its functionality, it must terminate promptly. See the following sections for more details on expected internal behavior of the analyzer.
#### Configuration
Analyzers may need dynamic configuration data, such as credentials or other secrets, in order to complete their function. Optional configuration files can provide this information, and are expected to reside in the analyzer's directory. Configuration files are typically written in YAML syntax for ease of modification.
Configuration files for analyzers included with Security Onion will be pillarized, meaning they derive their custom values from the Saltstack pillar data. For example, an analyzer that requires a user supplied credential might contain a config file resembling the following, where Jinja templating syntax is used to extra Salt pillar data:
```yaml
username: {{ salt['pillar.get']('sensoroni:analyzers:myanalyzer:username', '') }}
password: {{ salt['pillar.get']('sensoroni:analyzers:myanalyzer:password', '') }}
```
Sensoroni will not provide any inputs to the analyzer during execution, other than the artifact input in JSON format. However, developers will likely need to test the analyzer outside of Sensoroni and without Jinja templating, therefore an alternate config file should normally be supplied as the configuration argument during testing. Analyzers should allow for this additional command line argument, but by default should automatically read a configuration file stored in the analyzer's directory.
#### Exit Code
If an analyzer determines it cannot or should not operate on the input then the analyzer should return an exit code of `126`.
If an analyzer does attempt to operate against the input then the exit code should be 0, regardless of the outcome. The outcome, be it an error, a confirmed threat detection, or perhaps an unknown outcome, should be noted in the output of the analyzer.
#### Output
The outcome of the analyzer is reflected in the analyzer's output to `stdout`. The output must be JSON formatted, and should contain the following fields.
`summary`: A very short summarization of the outcome. This should be under 50 characters, otherwise it will be truncated when displayed on the Analyzer job list.
`status`: Can be one of the following status values, which most appropriately reflects the outcome:
- `ok`: The analyzer has concluded that the provided input is not a known threat.
- `info`: This analyzer provides informative data, but does not attempt to conclude the input is a threat.
- `caution`: The data provided is inconclusive. Analysts should review this information further. This can be used in error scenarios, such as if the analyzer fails to complete, perhaps due to a remote service being offline.
- `threat`: The analyzer has detected that the input is likely related to a threat.
`error`: [Optional] If the analyzer encounters an unrecoverable error, those details, useful for administrators to troubleshoot the problem, should be placed in this field.
Additional fields are allowed, and should contain data that is specific to the analyzer.
Below is an example of a _urlhaus_ analyzer output. Note that the urlhaus raw JSON is added to a custom field called "response".
```json
{
"response": {
"blacklists": {
"spamhaus_dbl": "not listed",
"surbl": "not listed"
},
"date_added": "2022-04-07 12:39:14 UTC",
"host": "abeibaba.com",
"id": "2135795",
"larted": "false",
"last_online": null,
"payloads": null,
"query_status": "ok",
"reporter": "switchcert",
"tags": [
"Flubot"
],
"takedown_time_seconds": null,
"threat": "malware_download",
"url": "https://abeibaba.com/ian/?redacted",
"url_status": "offline",
"urlhaus_reference": "https://urlhaus.abuse.ch/url/2135795/"
},
"status": "threat",
"summary": "malware_download"
}
```
Users in SOC will be able to view the entire JSON output, therefore it is important that sensitive information, such as credentials or other secrets, is excluded from the output.
#### Internationalization
Some of the built-in analyzers use snake_case summary values, instead of human friendly words or phrases. These are identifiers that the SOC UI will use to lookup a localized translation for the user. The use of these identifiers is not required for custom analyzers. In fact, in order for an identifier to be properly localized the translations must exist in the SOC product, which is out of scope of this development guide. That said, the following generic translations might be useful for custom analyzers:
| Identifier | English |
| ------------------ | -------------------------- |
| `malicious` | Malicious |
| `suspicious` | Suspicious |
| `harmless` | Harmless |
| `internal_failure` | Analyzer Internal Failure |
| `timeout` | Remote Host Timed Out |
#### Timeout
It is expected that analyzers will finish quickly, but there is a default timeout in place that will abort the analyzer if the timeout is exceeded. By default that timeout is 15 minutes (900000 milliseconds), but can be customized via the `sensoroni:analyze_timeout_ms` salt pillar.
## Contributing
Review the Security Onion project [contribution guidelines](https://github.com/Security-Onion-Solutions/securityonion/blob/master/CONTRIBUTING.md) if you are considering contributing an analyzer to the Security Onion project.
#### Procedure
In order to make a custom analyzer into a permanent Security Onion analyzer, the following steps need to be taken:
1. Fork the [securityonion GitHub repository](https://github.com/Security-Onion-Solutions/securityonion)
2. Copy your custom analyzer directory to the forked project, under the `securityonion/salt/sensoroni/files/analyzers` directory.
3. Ensure the contribution requirements in the following section are met.
4. Submit a [pull request](https://github.com/Security-Onion-Solutions/securityonion/pulls) to merge your GitHub fork back into the `securityonion` _dev_ branch.
#### Requirements
The following requirements must be satisfied in order for analyzer pull requests to be accepted into the Security Onion GitHub project:
- Analyzer contributions must not contain licensed dependencies or source code that is incompatible with the [GPLv2 licensing](https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html).
- All source code must pass the `flake8` lint check. This ensures source code conforms to the same style guides as the other analyzers. The Security Onion project will automatically run the linter after each push to a `securityonion` repository fork, and again when submitting a pull request. Failed lint checks will result in the submitter being sent an automated email message.
- All source code must include accompanying unit test coverage. The Security Onion project will automatically run the unit tests after each push to a `securityonion` repository fork, and again when submitting a pull request. Failed unit tests, or insufficient unit test coverage, will result in the submitter being sent an automated email message.
- Documentation of the analyzer, its input requirements, conditions for operation, and other relevant information must be clearly written in an accompanying analyzer metadata file. This file is described in more detail earlier in this document.
- Source code must be well-written and be free of security defects that can put users or their data at unnecessary risk.

View File

@@ -0,0 +1,15 @@
#!/bin/bash
HOME_DIR=$(dirname "$0")
TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
echo "Missing dependencies. Consider running the following command:"
echo " python -m pip install flake8 pytest pytest-cov"
exit 1
fi
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
pytest "$TARGET_DIR" "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100

View File

@@ -0,0 +1,7 @@
{
"name": "EmailRep",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries the EmailRep API for email address reputation information",
"supportedTypes" : ["email", "mail"]
}

View File

@@ -0,0 +1,67 @@
import json
import os
import sys
import requests
import helpers
import argparse
def checkConfigRequirements(conf):
if "api_key" not in conf:
sys.exit(126)
else:
return True
def sendReq(conf, meta, email):
url = conf['base_url'] + email
headers = {"Key": conf['api_key']}
response = requests.request('GET', url=url, headers=headers)
return response.json()
def prepareResults(raw):
if "suspicious" in raw:
if raw['suspicious'] is True:
status = "caution"
summary = "suspicious"
elif raw['suspicious'] is False:
status = "ok"
summary = "harmless"
elif "status" in raw:
if raw["reason"] == "invalid email":
status = "caution"
summary = "Invalid email address."
if "exceeded daily limit" in raw["reason"]:
status = "caution"
summary = "Exceeded daily request limit."
else:
status = "caution"
summary = "internal_failure"
results = {'response': raw, 'summary': summary, 'status': status}
return results
def analyze(conf, input):
checkConfigRequirements(conf)
meta = helpers.loadMetadata(__file__)
data = helpers.parseArtifact(input)
helpers.checkSupportedType(meta, data["artifactType"])
response = sendReq(conf, meta, data["value"])
return prepareResults(response)
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='Search Greynoise for a given artifact')
parser.add_argument('artifact', help='the artifact represented in JSON format')
parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/emailrep.yaml", help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
base_url: https://emailrep.io/
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:emailrep:api_key', '') }}"

View File

@@ -0,0 +1,85 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
from emailrep import emailrep
import unittest
class TestEmailRepMethods(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
emailrep.main()
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
sysmock.assert_called_once_with(2)
def test_main_success(self):
output = {"foo": "bar"}
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('emailrep.emailrep.analyze', new=MagicMock(return_value=output)) as mock:
sys.argv = ["cmd", "input"]
emailrep.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
def test_checkConfigRequirements_not_present(self):
conf = {"not_a_file_path": "blahblah"}
with self.assertRaises(SystemExit) as cm:
emailrep.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
def test_sendReq(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
meta = {}
conf = {"base_url": "https://myurl/", "api_key": "abcd1234"}
email = "test@abc.com"
response = emailrep.sendReq(conf=conf, meta=meta, email=email)
mock.assert_called_once_with("GET", headers={"Key": "abcd1234"}, url="https://myurl/test@abc.com")
self.assertIsNotNone(response)
def test_prepareResults_invalidEmail(self):
raw = {"status": "fail", "reason": "invalid email"}
results = emailrep.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Invalid email address.")
self.assertEqual(results["status"], "caution")
def test_prepareResults_not_suspicious(self):
raw = {"email": "notsus@domain.com", "reputation": "high", "suspicious": False, "references": 21, "details": {"blacklisted": False, "malicious_activity": False, "profiles": ["twitter"]}}
results = emailrep.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "harmless")
self.assertEqual(results["status"], "ok")
def test_prepareResults_suspicious(self):
raw = {"email": "sus@domain.com", "reputation": "none", "suspicious": True, "references": 0, "details": {"blacklisted": False, "malicious_activity": False, "profiles": []}}
results = emailrep.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "suspicious")
self.assertEqual(results["status"], "caution")
def test_prepareResults_exceeded_limit(self):
raw = {"status": "fail", "reason": "exceeded daily limit. please wait 24 hrs or visit emailrep.io/key for an api key."}
results = emailrep.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Exceeded daily request limit.")
self.assertEqual(results["status"], "caution")
def test_prepareResults_error(self):
raw = {}
results = emailrep.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "internal_failure")
self.assertEqual(results["status"], "caution")
def test_analyze(self):
output = {"email": "sus@domain.com", "reputation": "none", "suspicious": True, "references": 0, "details": {"blacklisted": False, "malicious_activity": False, "profiles": []}}
artifactInput = '{"value":"sus@domain.com","artifactType":"email"}'
conf = {"base_url": "myurl/", "api_key": "abcd1234"}
with patch('emailrep.emailrep.sendReq', new=MagicMock(return_value=output)) as mock:
results = emailrep.analyze(conf, artifactInput)
self.assertEqual(results["summary"], "suspicious")
mock.assert_called_once()

View File

@@ -0,0 +1,2 @@
requests>=2.27.1
pyyaml>=6.0

View File

@@ -0,0 +1,7 @@
{
"name": "Greynoise IP Analyzer",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries Greynoise for context around an IP address",
"supportedTypes" : ["ip"]
}

View File

@@ -0,0 +1,78 @@
import json
import os
import sys
import requests
import helpers
import argparse
def checkConfigRequirements(conf):
if "api_key" not in conf or len(conf['api_key']) == 0:
sys.exit(126)
else:
return True
def sendReq(conf, meta, ip):
url = conf['base_url']
if conf['api_version'] == 'community':
url = url + 'v3/community/' + ip
elif conf['api_version'] == 'investigate' or 'automate':
url = url + 'v2/noise/context/' + ip
headers = {"key": conf['api_key']}
response = requests.request('GET', url=url, headers=headers)
return response.json()
def prepareResults(raw):
if "message" in raw:
if "Success" in raw["message"]:
if "classification" in raw:
if "benign" in raw['classification']:
status = "ok"
summary = "harmless"
elif "malicious" in raw['classification']:
status = "threat"
summary = "malicious"
elif "unknown" in raw['classification']:
status = "caution"
summary = "Results found."
elif "IP not observed scanning the internet or contained in RIOT data set." in raw["message"]:
status = "ok"
summary = "no_results"
elif "Request is not a valid routable IPv4 address" in raw["message"]:
status = "caution"
summary = "Invalid IP address."
else:
status = "info"
summary = raw["message"]
else:
status = "caution"
summary = "internal_failure"
results = {'response': raw, 'summary': summary, 'status': status}
return results
def analyze(conf, input):
checkConfigRequirements(conf)
meta = helpers.loadMetadata(__file__)
data = helpers.parseArtifact(input)
helpers.checkSupportedType(meta, data["artifactType"])
response = sendReq(conf, meta, data["value"])
return prepareResults(response)
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='Search Greynoise for a given artifact')
parser.add_argument('artifact', help='the artifact represented in JSON format')
parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/greynoise.yaml", help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,3 @@
base_url: https://api.greynoise.io/
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:greynoise:api_key', '') }}"
api_version: "{{ salt['pillar.get']('sensoroni:analyzers:greynoise:api_version', 'community') }}"

View File

@@ -0,0 +1,117 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
from greynoise import greynoise
import unittest
class TestGreynoiseMethods(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
greynoise.main()
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
sysmock.assert_called_once_with(2)
def test_main_success(self):
output = {"foo": "bar"}
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('greynoise.greynoise.analyze', new=MagicMock(return_value=output)) as mock:
sys.argv = ["cmd", "input"]
greynoise.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
def test_checkConfigRequirements_not_present(self):
conf = {"not_a_file_path": "blahblah"}
with self.assertRaises(SystemExit) as cm:
greynoise.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
def test_sendReq_community(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
meta = {}
conf = {"base_url": "https://myurl/", "api_key": "abcd1234", "api_version": "community"}
ip = "192.168.1.1"
response = greynoise.sendReq(conf=conf, meta=meta, ip=ip)
mock.assert_called_once_with("GET", headers={'key': 'abcd1234'}, url="https://myurl/v3/community/192.168.1.1")
self.assertIsNotNone(response)
def test_sendReq_investigate(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
meta = {}
conf = {"base_url": "https://myurl/", "api_key": "abcd1234", "api_version": "investigate"}
ip = "192.168.1.1"
response = greynoise.sendReq(conf=conf, meta=meta, ip=ip)
mock.assert_called_once_with("GET", headers={'key': 'abcd1234'}, url="https://myurl/v2/noise/context/192.168.1.1")
self.assertIsNotNone(response)
def test_sendReq_automate(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
meta = {}
conf = {"base_url": "https://myurl/", "api_key": "abcd1234", "api_version": "automate"}
ip = "192.168.1.1"
response = greynoise.sendReq(conf=conf, meta=meta, ip=ip)
mock.assert_called_once_with("GET", headers={'key': 'abcd1234'}, url="https://myurl/v2/noise/context/192.168.1.1")
self.assertIsNotNone(response)
def test_prepareResults_invalidIP(self):
raw = {"message": "Request is not a valid routable IPv4 address"}
results = greynoise.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Invalid IP address.")
self.assertEqual(results["status"], "caution")
def test_prepareResults_not_found(self):
raw = {"ip": "192.190.1.1", "noise": "false", "riot": "false", "message": "IP not observed scanning the internet or contained in RIOT data set."}
results = greynoise.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "no_results")
self.assertEqual(results["status"], "ok")
def test_prepareResults_benign(self):
raw = {"ip": "8.8.8.8", "noise": "false", "riot": "true", "classification": "benign", "name": "Google Public DNS", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"}
results = greynoise.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "harmless")
self.assertEqual(results["status"], "ok")
def test_prepareResults_malicious(self):
raw = {"ip": "121.142.87.218", "noise": "true", "riot": "false", "classification": "malicious", "name": "unknown", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"}
results = greynoise.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "malicious")
self.assertEqual(results["status"], "threat")
def test_prepareResults_unknown(self):
raw = {"ip": "221.4.62.149", "noise": "true", "riot": "false", "classification": "unknown", "name": "unknown", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"}
results = greynoise.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Results found.")
self.assertEqual(results["status"], "caution")
def test_prepareResults_unknown_message(self):
raw = {"message": "unknown"}
results = greynoise.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "unknown")
self.assertEqual(results["status"], "info")
def test_prepareResults_error(self):
raw = {}
results = greynoise.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "internal_failure")
self.assertEqual(results["status"], "caution")
def test_analyze(self):
output = {"ip": "221.4.62.149", "noise": "true", "riot": "false", "classification": "unknown", "name": "unknown", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"}
artifactInput = '{"value":"221.4.62.149","artifactType":"ip"}'
conf = {"base_url": "myurl/", "api_key": "abcd1234", "api_version": "community"}
with patch('greynoise.greynoise.sendReq', new=MagicMock(return_value=output)) as mock:
results = greynoise.analyze(conf, artifactInput)
self.assertEqual(results["summary"], "Results found.")
mock.assert_called_once()

View File

@@ -0,0 +1,2 @@
requests>=2.27.1
pyyaml>=6.0

View File

@@ -0,0 +1,28 @@
import json
import os
import sys
import yaml
def checkSupportedType(meta, artifact_type):
if artifact_type not in meta['supportedTypes']:
sys.exit(126)
else:
return True
def parseArtifact(artifact):
data = json.loads(artifact)
return data
def loadMetadata(file):
dir = os.path.dirname(os.path.realpath(file))
filename = os.path.realpath(file).rsplit('/', 1)[1].split('.')[0]
with open(str(dir + "/" + filename + ".json"), "r") as metafile:
return json.load(metafile)
def loadConfig(path):
with open(str(path), "r") as conffile:
return yaml.safe_load(conffile)

View File

@@ -0,0 +1,35 @@
from unittest.mock import patch, MagicMock
import helpers
import os
import unittest
class TestHelpersMethods(unittest.TestCase):
def test_checkSupportedType(self):
with patch('sys.exit', new=MagicMock()) as mock:
meta = {"supportedTypes": ["ip", "foo"]}
result = helpers.checkSupportedType(meta, "ip")
self.assertTrue(result)
mock.assert_not_called()
result = helpers.checkSupportedType(meta, "bar")
self.assertFalse(result)
mock.assert_called_once_with(126)
def test_loadMetadata(self):
dir = os.path.dirname(os.path.realpath(__file__))
input = dir + '/urlhaus/urlhaus.py'
data = helpers.loadMetadata(input)
self.assertEqual(data["name"], "Urlhaus")
def test_loadConfig(self):
dir = os.path.dirname(os.path.realpath(__file__))
data = helpers.loadConfig(dir + "/virustotal/virustotal.yaml")
self.assertEqual(data["base_url"], "https://www.virustotal.com/api/v3/search?query=")
def test_parseArtifact(self):
input = '{"value":"foo","artifactType":"bar"}'
data = helpers.parseArtifact(input)
self.assertEqual(data["artifactType"], "bar")
self.assertEqual(data["value"], "foo")

View File

@@ -0,0 +1,7 @@
{
"name": "JA3er Hash Search",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries JA3er user agents and sightings",
"supportedTypes" : ["ja3"]
}

View File

@@ -0,0 +1,53 @@
import json
import os
import requests
import helpers
import argparse
def sendReq(conf, meta, hash):
url = conf['base_url'] + hash
response = requests.request('GET', url)
return response.json()
def prepareResults(raw):
if "error" in raw:
if "Sorry" in raw["error"]:
status = "ok"
summary = "No results found."
elif "Invalid hash" in raw["error"]:
status = "caution"
summary = "Invalid hash."
else:
status = "caution"
summary = "internal_failure"
else:
status = "info"
summary = "Results found."
results = {'response': raw, 'summary': summary, 'status': status}
return results
def analyze(conf, input):
meta = helpers.loadMetadata(__file__)
data = helpers.parseArtifact(input)
helpers.checkSupportedType(meta, data["artifactType"])
response = sendReq(conf, meta, data["value"])
return prepareResults(response)
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='Search JA3er for a given artifact')
parser.add_argument('artifact', help='the artifact represented in JSON format')
parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/ja3er.yaml", help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
base_url: https://ja3er.com/search/

View File

@@ -0,0 +1,65 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
from ja3er import ja3er
import unittest
class TestJa3erMethods(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
ja3er.main()
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
sysmock.assert_called_once_with(2)
def test_main_success(self):
output = {"foo": "bar"}
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('ja3er.ja3er.analyze', new=MagicMock(return_value=output)) as mock:
sys.argv = ["cmd", "input"]
ja3er.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
def test_sendReq(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
meta = {}
conf = {"base_url": "myurl/"}
hash = "abcd1234"
response = ja3er.sendReq(conf=conf, meta=meta, hash=hash)
mock.assert_called_once_with("GET", "myurl/abcd1234")
self.assertIsNotNone(response)
def test_prepareResults_none(self):
raw = {"error": "Sorry no values found"}
results = ja3er.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "No results found.")
self.assertEqual(results["status"], "ok")
def test_prepareResults_invalidHash(self):
raw = {"error": "Invalid hash"}
results = ja3er.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Invalid hash.")
self.assertEqual(results["status"], "caution")
def test_prepareResults_info(self):
raw = [{"User-Agent": "Blah/5.0", "Count": 24874, "Last_seen": "2022-04-08 16:18:38"}, {"Comment": "Brave browser v1.36.122\n\n", "Reported": "2022-03-28 20:26:42"}]
results = ja3er.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Results found.")
self.assertEqual(results["status"], "info")
def test_analyze(self):
output = {"info": "Results found."}
artifactInput = '{"value":"abcd1234","artifactType":"ja3"}'
conf = {"base_url": "myurl/"}
with patch('ja3er.ja3er.sendReq', new=MagicMock(return_value=output)) as mock:
results = ja3er.analyze(conf, artifactInput)
self.assertEqual(results["summary"], "Results found.")
mock.assert_called_once()

View File

@@ -0,0 +1,2 @@
requests>=2.27.1
pyyaml>=6.0

View File

@@ -0,0 +1,7 @@
{
"name": "Local File Analyzer",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries one or more local CSV files for a value, then returns all columns within matching rows.",
"supportedTypes" : ["domain", "hash", "ip", "other", "url"]
}

View File

@@ -0,0 +1,79 @@
import json
import helpers
import os
import sys
import argparse
import csv
def checkConfigRequirements(conf):
if "file_path" not in conf or len(conf['file_path']) == 0:
sys.exit(126)
else:
return True
def searchFile(artifact, csvfiles):
dir = os.path.dirname(os.path.realpath(__file__))
found = []
for f in csvfiles:
filename = dir + "/" + f
with open(filename, "r") as csvfile:
csvdata = csv.DictReader(csvfile)
for row in csvdata:
first_key = list(row.keys())[0]
if artifact in row[first_key]:
row.update({"filename": filename})
found.append(row)
if len(found) != 0:
if len(found) == 1:
results = found[0]
else:
results = found
else:
results = "No results"
return results
def prepareResults(raw):
if len(raw) > 0:
if "No results" in raw:
status = "ok"
summary = "no_results"
else:
status = "info"
summary = "One or more matches found."
else:
raw = {}
status = "caution"
summary = "internal_failure"
response = raw
results = {'response': response, 'status': status, 'summary': summary}
return results
def analyze(conf, input):
checkConfigRequirements(conf)
meta = helpers.loadMetadata(__file__)
data = helpers.parseArtifact(input)
helpers.checkSupportedType(meta, data["artifactType"])
search = searchFile(data["value"], conf['file_path'])
results = prepareResults(search)
return results
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='Search CSV file for a given artifact')
parser.add_argument('artifact', help='the artifact represented in JSON format')
parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/localfile.yaml", help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
file_path: []

View File

@@ -0,0 +1,4 @@
indicator,description,reference
abcd1234,This is a test!,Testing
abcd1234,This is another test!,Testing
192.168.1.1,Yet another test!,Testing
1 indicator description reference
2 abcd1234 This is a test! Testing
3 abcd1234 This is another test! Testing
4 192.168.1.1 Yet another test! Testing

View File

@@ -0,0 +1,119 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
from localfile import localfile
import unittest
class TestLocalfileMethods(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
localfile.main()
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
sysmock.assert_called_once_with(2)
def test_main_success(self):
output = {"foo": "bar"}
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('localfile.localfile.analyze', new=MagicMock(return_value=output)) as mock:
sys.argv = ["cmd", "input"]
localfile.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
def test_checkConfigRequirements_present(self):
conf = {"file_path": "['intel.csv']"}
self.assertTrue(localfile.checkConfigRequirements(conf))
def test_checkConfigRequirements_not_present(self):
conf = {"not_a_file_path": "blahblah"}
with self.assertRaises(SystemExit) as cm:
localfile.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
def test_checkConfigRequirements_empty(self):
conf = {"file_path": ""}
with self.assertRaises(SystemExit) as cm:
localfile.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
def test_searchFile_multiple_found(self):
artifact = "abcd1234"
results = localfile.searchFile(artifact, ["localfile_test.csv"])
self.assertEqual(results[0]["indicator"], "abcd1234")
self.assertEqual(results[0]["description"], "This is a test!")
self.assertEqual(results[0]["reference"], "Testing")
self.assertEqual(results[1]["indicator"], "abcd1234")
self.assertEqual(results[1]["description"], "This is another test!")
def test_searchFile_single_found(self):
artifact = "192.168.1.1"
results = localfile.searchFile(artifact, ["localfile_test.csv"])
self.assertEqual(results["indicator"], "192.168.1.1")
self.assertEqual(results["description"], "Yet another test!")
self.assertEqual(results["reference"], "Testing")
def test_searchFile_not_found(self):
artifact = "youcan'tfindme"
results = localfile.searchFile(artifact, ["localfile_test.csv"])
self.assertEqual(results, "No results")
def test_prepareResults_none(self):
raw = "No results"
results = localfile.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "no_results")
self.assertEqual(results["status"], "ok")
def test_prepareResults_ok(self):
raw = [
{
"description": "This is one BAD piece of malware!",
"filename": "/opt/sensoroni/analyzers/localfile/intel.csv",
"indicator": "abc1234",
"reference": "https://myintelservice"
},
{
"filename": "/opt/sensoroni/analyzers/localfile/random.csv",
"randomcol1": "myothervalue",
"randomcol2": "myotherothervalue",
"value": "abc1234"
}
]
results = localfile.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "One or more matches found.")
self.assertEqual(results["status"], "info")
def test_prepareResults_error(self):
raw = {}
results = localfile.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "internal_failure")
self.assertEqual(results["status"], "caution")
def test_analyze(self):
output = [
{
"description": "This is one BAD piece of malware!",
"filename": "/opt/sensoroni/analyzers/localfile/intel.csv",
"indicator": "abc1234",
"reference": "https://myintelservice"
},
{
"filename": "/opt/sensoroni/analyzers/localfile/random.csv",
"randomcol1": "myothervalue",
"randomcol2": "myotherothervalue",
"value": "abc1234"
}
]
artifactInput = '{"value":"foo","artifactType":"url"}'
conf = {"file_path": "/home/intel.csv"}
with patch('localfile.localfile.searchFile', new=MagicMock(return_value=output)) as mock:
results = localfile.analyze(conf, artifactInput)
self.assertEqual(results["summary"], "One or more matches found.")
mock.assert_called_once()

View File

@@ -0,0 +1,2 @@
requests>=2.27.1
pyyaml>=6.0

View File

@@ -0,0 +1,7 @@
{
"name": "Alienvault OTX",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries Alienvault OTX for a domain, hash, IP, or URL, then returns a report for it.",
"supportedTypes" : ["domain", "hash", "ip", "url"]
}

View File

@@ -0,0 +1,88 @@
import json
import requests
import helpers
import sys
import os
import argparse
def buildReq(conf, artifact_type, artifact_value):
headers = {"X-OTX-API-KEY": conf["api_key"]}
base_url = conf['base_url']
if artifact_type == "ip":
uri = "indicators/IPv4/"
elif artifact_type == "url":
uri = "indicators/url/"
elif artifact_type == "domain":
uri = "indicators/domain/"
elif artifact_type == "hash":
uri = "indicators/file/"
section = "/general"
url = base_url + uri + artifact_value + section
return url, headers
def checkConfigRequirements(conf):
if "api_key" not in conf or len(conf['api_key']) == 0:
sys.exit(126)
else:
return True
def sendReq(url, headers):
response = requests.request('GET', url, headers=headers)
return response.json()
def prepareResults(response):
if len(response) != 0:
raw = response
if 'reputation' in raw:
reputation = raw["reputation"]
if reputation == 0:
status = "ok"
summaryinfo = "harmless"
elif reputation > 0 and reputation < 50:
status = "ok"
summaryinfo = "Likely Harmless"
elif reputation >= 50 and reputation < 75:
status = "caution"
summaryinfo = "suspicious"
elif reputation >= 75 and reputation <= 100:
status = "threat"
summaryinfo = "malicious"
else:
status = "info"
summaryinfo = "Analysis complete."
else:
raw = {}
status = "caution"
summaryinfo = "internal_failure"
results = {'response': raw, 'status': status, 'summary': summaryinfo}
return results
def analyze(conf, input):
checkConfigRequirements(conf)
meta = helpers.loadMetadata(__file__)
data = helpers.parseArtifact(input)
helpers.checkSupportedType(meta, data["artifactType"])
request = buildReq(conf, data["artifactType"], data["value"])
response = sendReq(request[0], request[1])
return prepareResults(response)
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='Search Alienvault OTX for a given artifact')
parser.add_argument('artifact', help='the artifact represented in JSON format')
parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/otx.yaml", help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
base_url: https://otx.alienvault.com/api/v1/
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:otx:api_key', '') }}"

View File

@@ -0,0 +1,250 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
from otx import otx
import unittest
class TestOtxMethods(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
otx.main()
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
sysmock.assert_called_once_with(2)
def test_main_success(self):
output = {"foo": "bar"}
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('otx.otx.analyze', new=MagicMock(return_value=output)) as mock:
sys.argv = ["cmd", "input"]
otx.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
def checkConfigRequirements(self):
conf = {"not_a_key": "abcd12345"}
with self.assertRaises(SystemExit) as cm:
otx.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
def test_buildReq_domain(self):
conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'}
artifact_type = "domain"
artifact_value = "abc.com"
result = otx.buildReq(conf, artifact_type, artifact_value)
self.assertEqual("https://myurl/indicators/domain/abc.com/general", result[0])
self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1])
def test_buildReq_hash(self):
conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'}
artifact_type = "hash"
artifact_value = "abcd1234"
result = otx.buildReq(conf, artifact_type, artifact_value)
self.assertEqual("https://myurl/indicators/file/abcd1234/general", result[0])
self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1])
def test_buildReq_ip(self):
conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'}
artifact_type = "ip"
artifact_value = "192.168.1.1"
result = otx.buildReq(conf, artifact_type, artifact_value)
self.assertEqual("https://myurl/indicators/IPv4/192.168.1.1/general", result[0])
self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1])
def test_buildReq_url(self):
conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'}
artifact_type = "url"
artifact_value = "https://abc.com"
result = otx.buildReq(conf, artifact_type, artifact_value)
self.assertEqual("https://myurl/indicators/url/https://abc.com/general", result[0])
self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1])
def test_sendReq(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
url = "https://myurl="
response = otx.sendReq(url, headers={"x-apikey": "xyz"})
mock.assert_called_once_with("GET", "https://myurl=", headers={"x-apikey": "xyz"})
self.assertIsNotNone(response)
def test_prepareResults_harmless(self):
raw = {
"whois": "http://whois.domaintools.com/192.168.1.1",
"reputation": 0,
"indicator": "192.168.1.1",
"type": "IPv4",
"pulse_info": {
"count": 0,
"pulses": [],
"related": {
"alienvault": {
"adversary": [],
"malware_families": []
}
}
},
"false_positive": [],
"sections": [
"general"
]
}
results = otx.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "harmless")
self.assertEqual(results["status"], "ok")
def test_prepareResults_likely_harmless(self):
raw = {
"whois": "http://whois.domaintools.com/192.168.1.1",
"reputation": 49,
"indicator": "192.168.1.1",
"type": "IPv4",
"pulse_info": {
"count": 0,
"pulses": [],
"related": {
"alienvault": {
"adversary": [],
"malware_families": []
}
}
},
"false_positive": [],
"sections": [
"general"
]
}
results = otx.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Likely Harmless")
self.assertEqual(results["status"], "ok")
def test_prepareResults_suspicious(self):
raw = {
"whois": "http://whois.domaintools.com/192.168.1.1",
"reputation": 50,
"indicator": "192.168.1.1",
"type": "IPv4",
"pulse_info": {
"count": 0,
"pulses": [],
"related": {
"alienvault": {
"adversary": [],
"malware_families": []
}
}
},
"false_positive": [],
"sections": [
"general"
]
}
results = otx.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "suspicious")
self.assertEqual(results["status"], "caution")
def test_prepareResults_threat(self):
raw = {
"whois": "http://whois.domaintools.com/192.168.1.1",
"reputation": 75,
"indicator": "192.168.1.1",
"type": "IPv4",
"pulse_info": {
"count": 0,
"pulses": [],
"related": {
"alienvault": {
"adversary": [],
"malware_families": []
}
}
},
"false_positive": [],
"sections": [
"general"
]
}
results = otx.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "malicious")
self.assertEqual(results["status"], "threat")
def test_prepareResults_undetermined(self):
raw = {
"alexa": "",
"base_indicator": {},
"domain": "Unavailable",
"false_positive": [],
"hostname": "Unavailable",
"indicator": "http://192.168.1.1",
"pulse_info": {
"count": 0,
"pulses": [],
"references": [],
"related": {
"alienvault": {
"adversary": [],
"industries": [],
"malware_families": [],
"unique_indicators": 0
},
"other": {
"adversary": [],
"industries": [],
"malware_families": [],
"unique_indicators": 0
}
}
},
"sections": [
"general"
],
"type": "url",
"type_title": "URL",
"validation": []
}
results = otx.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "Analysis complete.")
self.assertEqual(results["status"], "info")
def test_prepareResults_error(self):
raw = {}
results = otx.prepareResults(raw)
self.assertEqual(results["response"], raw)
self.assertEqual(results["summary"], "internal_failure")
self.assertEqual(results["status"], "caution")
def test_analyze(self):
output = {
"whois": "http://whois.domaintools.com/192.168.1.1",
"reputation": 0,
"indicator": "192.168.1.1",
"type": "IPv4",
"pulse_info": {
"count": 0,
"pulses": [],
"related": {
"alienvault": {
"adversary": [],
"malware_families": []
}
}
},
"false_positive": [],
"sections": [
"general"
]
}
artifactInput = '{"value":"192.168.1.1","artifactType":"ip"}'
conf = {"base_url": "https://myurl/", "api_key": "xyz"}
with patch('otx.otx.sendReq', new=MagicMock(return_value=output)) as mock:
results = otx.analyze(conf, artifactInput)
self.assertEqual(results["summary"], "harmless")
mock.assert_called_once()

View File

@@ -0,0 +1,2 @@
requests>=2.27.1
pyyaml>=6.0

Some files were not shown because too many files have changed in this diff Show More