Merge branch 'dev' into feature/nginx-update

# Conflicts:
#	salt/nginx/etc/nginx.conf
#	salt/nginx/etc/nginx.conf.so-eval
#	salt/nginx/etc/nginx.conf.so-manager
#	salt/nginx/etc/nginx.conf.so-standalone
This commit is contained in:
William Wernert
2020-10-19 13:25:46 -04:00
265 changed files with 25261 additions and 9355 deletions

51
KEYS Normal file
View File

@@ -0,0 +1,51 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBF7rzwEBEADBg87uJhnC3Ls7s60hbHGaywGrPtbz2WuYA/ev3YS3X7WS75p8
PGlzTWUCujx0pEHbK2vYfExl3zksZ8ZmLyZ9VB3oSLiWBzJgKAeB7YCFEo8te+eE
P2Z+8c+kX4eOV+2waxZyewA2TipSkhWgStSI4Ow8SyVUcUWA3hCw7mo2duNVi7KO
C3vvI3wzirH+8/XIGo+lWTg6yYlSxdf+0xWzYvV2QCMpwzJfARw6GGXtfCZw/zoO
o4+YPsiyztQdyI1y+g3Fbesl65E36DelbyP+lYd2VecX8ELEv0wlKCgHYlk6lc+n
qnOotVjWbsyXuFfo06PHUd6O9n3nmo0drC6kmXGw1e8hu0t8VcGfMTKS/hszwVUY
bHS6kbfsOoAb6LXPWKfqxk/BdreLXmcHHz88DimS3OS0JufkcmkjxEzSFRL0kb2h
QVb1SATrbx+v2RWQXvi9sLCjT2fdOiwi1Tgc84orc7A1C3Jwu353YaX9cV+n5uyG
OZ2AULZ5z2h13sVuiZAwfyyFs/O0CJ783hFA2TNPnyNGAgw/kaIo7nNRnggtndBo
oQzVS+BHiFx98IF4zDqmF2r2+jOCjxSrw8KnZBe4bgXFtl89DmjoejGvWDnu2MVM
pZDEs1DcOxHBQmTCWMIYLyNKG0xW6diyWBxEIaa7YgrP6kA+RaDfZ/xXPwARAQAB
tD9TZWN1cml0eSBPbmlvbiBTb2x1dGlvbnMsIExMQyA8aW5mb0BzZWN1cml0eW9u
aW9uc29sdXRpb25zLmNvbT6JAlQEEwEKAD4WIQTIBKk9Nr4Mcz6hlkR8EGC3/lBw
EwUCXuvPAQIbAwUJEswDAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRB8EGC3
/lBwExB1D/42xIDGU2XFNFyTU+ZqzDA8qNC9hEKjLeizbeM8RIm3xO+3p7SdqbuJ
7pA8gk0RiHuILb+Ba1xiSh/w/W2bOxQhsXuWHih2z3W1tI+hu6RQhIm4e6CIHHf7
Vzj4RSvHOVS0AzITUwkHjv0x0Z8zVBPJfEHKkK2x03BqP1o12rd7n2ZMrSfN6sED
fUwOJLDjthShtyLSPBVG8j7T5cfSCPSLhfVOKPQVcI1sSir7RLeyxt1v1kzjQdaA
+znxO8EgfZJN93wzfBrAGcVT8KmpmgwR6p46m20wJXyZC9DZxJ0o1y3toVWTC+kP
Qj1ROPivySVn10rBoOJk8HteyhW07gTcydq+noKHV7SqJ1899xRAYP7rDCfI9iMW
Nn22ZDLnAkIcbNR7JLJCHwsZH/Umo9KO/dIccIqVQel3UCCYZcWTZW0VkcjqVKRa
eK+JQGaJPrBAoxIG5/sMlbk2sINSubNWlcbH6kM0V8NVwdPiOO9xLmp2hI4ICxE3
M+O2HCNX4QYzVizzTFxEvW3ieLa4nePQ8J6lvMI2oLkFP7xHoFluvZnuwfNvoEy0
RnlHExN1UQTUvcbCxIbzjaJ4HJXilWHjgmGaVQO1S7AYskWnNWQ7uJvxnuZBNNwm
pIvwYEZp23fYaWl/xKqnmPMy2ADjROBKlCm7L+Ntq1r7ELGW5ZCTobkCDQRe688B
ARAA22GzdkSAo+mwJ2S1RbJ1G20tFnLsG/NC8iMN3lEh/PSmyPdB7mBtjZ+HPDzF
VSznXZdr3LItBBQOli2hVIj1lZBY7+s2ZufV3TFFwselUwT3b1g1KMkopD95Ckf8
WhLbSz2yqgrvcEvbB0HFX/ZEsHGqIz2kLacixjwXXLWOMQ2LNbeW1f5zQkBnaNNQ
/4njzTj68OxnvfplNYNJqi2pZGb2UqarYX04FqKNuocN8E7AC9FQdBXylmVctw9T
pQVwfCI76bTe6vPWb+keb6UNN1jyXVnhIQ3Fv5sFBsmgXf/hO8tqCotrKjEiK2/i
RkvFeqsGMXreCgYg9zW4k+DcJtVa+Q8juGOjElrubY3Ua9mCusx3vY4QYSWxQ5Ih
k1lXiUcM5Rt38lfpKHRJ5Pd4Y5xlWSQfZ7nmzbf/GzJQz+rWrA0X6Oc6cDOPLNXK
w1dAygre4f2bsp5kHQt6NMefxeNTDmi+4R62K0tb40f5q0Vxz8qdyD48bBsbULNx
kb6mjOAD+FNkfNXcGeuTq9oRnjx8i93mhYsIP5LFNDXS/zSP1nv0ZUFeIlGQGjV9
1wOvT454qkI9sKiVFtd4FrNKZJbKszxxDm+DPfB5j+hRC4oeEJ7w+sVyh3EawtfM
V7Mwj8i+7c3YUCravXBhSwG7SCTggFUgA8lMr8oWVgCATYsAEQEAAYkCPAQYAQoA
JhYhBMgEqT02vgxzPqGWRHwQYLf+UHATBQJe688BAhsMBQkSzAMAAAoJEHwQYLf+
UHATTtwQAJiztPW68ykifpFdwYFp1VC7c+uGLhWBqjDY9NSUKNC9caR7bV0cnNu8
07UG6j18gCB2GSkukXjOR/oTj6rNcW/WouPYfQOrw7+M2Ya8M8iq+E/HOXaXB3b4
FeCcB0UuwfcHHd2KbXrRHA+9GNpmuOcfTCdsPpIr41Xg4QltATDEt/FrzuKspXg4
vUKDXgfnbj7y0JcJM2FfcwWGlnAG5MMRyjJQAleGdiidX/9WxgJ4Mweq4qJM0jr3
Qsrc9VuzxsLr85no3Hn5UYVgT7bBZ59HUbQoi775m78MxN3mWUSdcyLQKovI+YXr
tshTxWIf/2Ovdzt6Wq1WWXOGGuK1qgdPJTFWrlh3amFdb70zR1p6A/Lthd7Zty+n
QjRZRQo5jBSnYtjhMrZP6rxM3QqnQ0frEKK9HfDYONk1Bw18CUtdwFGb9OMregLR
IjvNLp9coSh5yYAepZyUGEPRET0GsmVw2trQF0uyMSkQfiq2zjPto6WWbsmrrbLr
cfZ/wnBw1FoNEd51U54euo9yvOgOVtJGvqLgHNwB8574FhQhoWAMhyizqdgeEt26
m3FXecUNKL/AK71/l04vor+/WsXe8uhDg3O84qeYa9wgd8LZZVmGZJDosSwqYjtb
LdNNm+v60Zo6rFWSREegqi/nRTTDdxdW99ybjlh+mpbq3xavyFXF
=bhkm
-----END PGP PUBLIC KEY BLOCK-----

166
README.md
View File

@@ -1,170 +1,28 @@
## Security Onion 2.0.0.rc1
## Security Onion 2.3.0
Security Onion 2.0.0 RC1 is here! This version requires a fresh install, but there is good news - we have brought back soup! From now on, you should be able to run soup on the manager to upgrade your environment to RC2 and beyond!
### Changes:
- Re-branded 2.0 to give it a fresh look
- All documentation has moved to our [docs site](https://docs.securityonion.net/en/2.0)
- soup is alive! Note: This tool only updates Security Onion components. Please use the built-in OS update process to keep the OS and other components up to date.
- so-import-pcap is back! See the docs [here](http://docs.securityonion.net/en/2.0/so-import-pcap).
- Fixed issue with so-features-enable
- Users can now pivot to PCAP from Suricata alerts
- ISO install now prompts users to create an admin/sudo user instead of using a default account name
- The web email & password set during setup is now used to create the initial accounts for TheHive, Cortex, and Fleet
- Fixed issue with disk cleanup
- Changed the default permissions for /opt/so to keep non-priviledged users from accessing salt and related files
- Locked down access to certain SSL keys
- Suricata logs now compress after they roll over
- Users can now easily customize shard counts per index
- Improved Elastic ingest parsers including Windows event logs and Sysmon logs shipped with WinLogbeat and Osquery (ECS)
- Elastic nodes are now "hot" by default, making it easier to add a warm node later
- so-allow now runs at the end of an install so users can enable access right away
- Alert severities across Wazuh, Suricata and Playbook (Sigma) have been standardized and copied to `event.severity`:
- 1-Low / 2-Medium / 3-High / 4-Critical
- Initial implementation of alerting queues:
- Low & Medium alerts are accessible through Kibana & Hunt
- High & Critical alerts are accessible through Kibana, Hunt and sent to TheHive for immediate analysis
- ATT&CK Navigator is now a statically-hosted site in the nginx container
- Playbook
- All Sigma rules in the community repo (500+) are now imported and kept up to date
- Initial implementation of automated testing when a Play's detection logic has been edited (i.e., Unit Testing)
- Updated UI Theme
- Once authenticated through SOC, users can now access Playbook with analyst permissions without login
- Kolide Launcher has been updated to include the ability to pass arbitrary flags - new functionality sponsored by SOS
- Fixed issue with Wazuh authd registration service port not being correctly exposed
- Added option for exposure of Elasticsearch REST API (port 9200) to so-allow for easier external querying/integration with other tools
- Added option to so-allow for external Strelka file uploads (e.g., via `strelka-fileshot`)
- Added default YARA rules for Strelka -- default rules are maintained by Florian Roth and pulled from https://github.com/Neo23x0/signature-base
- Added the ability to use custom Zeek scripts
- Renamed "master server" to "manager node"
- Improved unification of Zeek and Strelka file data
## Hybrid Hunter Beta 1.4.1 - Beta 3
- Fix install script to handle hostnames properly.
Security Onion 2.3.0 is here!
## Hybrid Hunter Beta 1.4.0 - Beta 3
### Release Notes
- Complete overhaul of the way we handle custom and default settings and data. You will now see a default and local directory under the saltstack directory. All customizations are stored in local.
- The way firewall rules are handled has been completely revamped. This will allow the user to customize firewall rules much easier.
- Users can now change their own password in SOC.
- Hunt now allows users to enable auto-hunt. This is a toggle which, when enabled, automatically submits a new hunt when filtering, grouping, etc.
- Title bar now reflects current Hunt query. This will assist users in locating a previous query from their browser history.
- Zeek 3.0.7
- Elastic 7.7.1
- Suricata can now be used for meta data generation.
- Suricata eve.json has been moved to `/nsm` to align with storage of other data.
- Suricata will now properly rotate its logs.
- Grafana dashboards now work properly in standalone mode.
- Kibana Dashboard updates including osquery, community_id.
- New Elasticsearch Ingest processor to generate community_id from any log that includes the required fields.
- Community_id generated for additional logs: Zeek HTTP/SMTP/ , Sysmon shipped with Osquery or Winlogbeat.
- Major streamlining of Fleet setup & configuration - no need to run a secondary setup script anymore.
- Fleet Standalone node now includes the ability to set a FQDN to point osquery endpoints to.
- Distributed installs now support ingesting Windows Eventlogs via Winlogbeat - includes full parsing support for Sysmon.
- SOC Downloads section now includes a link to the supported version of Winlogbeat.
- Basic syslog ingestion capability now included.
- Elasticsearch index name transition fixes for various components.
- Updated URLs for pivot fields in Kibana.
- Instances of `hive` renamed to `thehive`.
### Known Issues:
- The Hunt feature is currently considered "Preview" and although very useful in its current state, not everything works. We wanted to get this out as soon as possible to get the feedback from you! Let us know what you want to see! Let us know what you think we should call it!
- You cannot pivot to PCAP from Suricata alerts in Kibana or Hunt.
- Navigator is currently not working when using hostname to access SOC. IP mode works correctly.
- Due to the move to ECS, the current Playbook plays may not alert correctly at this time.
- The osquery MacOS package does not install correctly.
## Hybrid Hunter Beta 1.3.0 - Beta 2
### Changes:
- New Feature: Codename: "Onion Hunt". Select Hunt from the menu and start hunting down your adversaries!
- Improved ECS support.
- Complete refactor of the setup to make it easier to follow.
- Improved setup script logging to better assist on any issues.
- Setup now checks for minimal requirements during install.
- Updated Cyberchef to version 9.20.3.
- Updated Elastalert to version 0.2.4 and switched to alpine to reduce container size.
- Updated Redis to 5.0.9 and switched to alpine to reduce container size.
- Updated Salt to 2019.2.5
- Updated Grafana to 6.7.3.
- Zeek 3.0.6
- Suricata 4.1.8
- Fixes so-status to now display correct containers and status.
- local.zeek is now controlled by a pillar instead of modifying the file directly.
- Renamed so-core to so-nginx and switched to alpine to reduce container size.
- Playbook now uses MySQL instead of SQLite.
- Sigma rules have all been updated.
- Kibana dashboard improvements for ECS.
- Fixed an issue where geoip was not properly parsed.
- ATT&CK Navigator is now it's own state.
- Standlone mode is now supported.
- Managersearch previously used the same Grafana dashboard as a Search node. It now has its own dashboard that incorporates panels from the Manager node and Search node dashboards.
### Known Issues:
- The Hunt feature is currently considered "Preview" and although very useful in its current state, not everything works. We wanted to get this out as soon as possible to get the feedback from you! Let us know what you want to see! Let us know what you think we should call it!
- You cannot pivot to PCAP from Suricata alerts in Kibana or Hunt.
- Updating users via the SOC ui is known to fail. To change a user, delete the user and re-add them.
- Due to the move to ECS, the current Playbook plays may not alert correctly at this time.
- The osquery MacOS package does not install correctly.
### Warnings and Disclaimers
- This BETA release is BLEEDING EDGE and TOTALLY UNSUPPORTED!
- If this breaks your system, you get to keep both pieces!
- This script is a work in progress and is in constant flux.
- This script is intended to build a quick prototype proof of concept so you can see what our new platform might look like. This configuration will change drastically over time leading up to the final release.
- Do NOT run this on a system that you care about!
- Do NOT run this on a system that has data that you care about!
- This script should only be run on a TEST box with TEST data!
- Use of this script may result in nausea, vomiting, or a burning sensation.
https://docs.securityonion.net/en/2.3/release-notes.html
### Requirements
Evaluation Mode:
https://docs.securityonion.net/en/2.3/hardware.html
- ISO or a Single VM running Ubuntu 18.04 or CentOS 7
- Minimum 12GB of RAM
- Minimum 4 CPU cores
- Minimum 2 NICs
### Download
Distributed:
- 3 VMs running the ISO or Ubuntu 18.04 or CentOS 7 (You can mix and match)
- Minimum 8GB of RAM per VM
- Minimum 4 CPU cores per VM
- Minimum 2 NICs for forward nodes
https://docs.securityonion.net/en/2.3/download.html
### Installation
For most users, we recommend installing using [our ISO image](https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/ISO).
If instead you would like to try a manual installation (not using our ISO), you can build from CentOS 7 or Ubuntu 18.04.
If using CentOS 7 Minimal, you will need to install git:
```sudo yum -y install git```
Once you have git, then do the following:
```
git clone https://github.com/Security-Onion-Solutions/securityonion-saltstack
cd securityonion-saltstack
sudo bash so-setup-network
```
Follow the prompts and reboot if asked to do so.
Then proceed to the [Hybrid Hunter Quick Start Guide](https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/Hybrid-Hunter-Quick-Start-Guide).
https://docs.securityonion.net/en/2.3/installation.html
### FAQ
See the [FAQ](https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/FAQ) on the Hybrid Hunter wiki.
https://docs.securityonion.net/en/2.3/faq.html
### Feedback
If you have questions, problems, or other feedback regarding Hybrid Hunter, please post to our subreddit and prefix the title with **[Hybrid Hunter]**:<br>
https://www.reddit.com/r/securityonion/
https://docs.securityonion.net/en/2.3/community-support.html

50
VERIFY_ISO.md Normal file
View File

@@ -0,0 +1,50 @@
### 2.3.0 ISO image built on 2020/10/15
### Download and Verify
2.3.0 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.0.iso
MD5: E05B220E4FD7C054DF5C50906EE1375B
SHA1: 55E93C6EAB140AB4A0F07873CC871EBFDC699CD6
SHA256: 57B96A6E0951143E123BFC0CD0404F7466776E69F3C115F5A0444C0C6D5A6E32
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.0.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
For example, here are the steps you can use on most Linux distributions to download and verify our Security Onion ISO image.
Download and import the signing key:
```
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -O - | gpg --import -
```
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.0.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.0.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.3.0.iso.sig securityonion-2.3.0.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Thu 15 Oct 2020 08:06:28 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
```
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
https://docs.securityonion.net/en/2.3/installation.html

View File

@@ -1 +1 @@
2.0.0-rc.1
2.3.1

View File

@@ -13,6 +13,7 @@ role:
fleet:
heavynode:
helixsensor:
import:
manager:
managersearch:
standalone:

View File

@@ -0,0 +1,14 @@
[Unit]
Description=The Salt Master Server
Documentation=man:salt-master(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target
[Service]
LimitNOFILE=100000
Type=notify
NotifyAccess=all
ExecStart=/usr/bin/salt-master
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -44,11 +44,11 @@ echo " guid: $GUID" >> $local_salt_dir/pillar/data/$TYPE.sls
echo " rootfs: $ROOTFS" >> $local_salt_dir/pillar/data/$TYPE.sls
echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls
if [ $TYPE == 'sensorstab' ]; then
echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
salt-call state.apply grafana queue=True
fi
if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then
echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
if [ ! $10 ]; then
salt-call state.apply grafana queue=True
salt-call state.apply utility queue=True

View File

@@ -1,11 +1,11 @@
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set ZEEKVER = salt['pillar.get']('static:zeekversion', 'COMMUNITY') %}
{% set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval:

View File

@@ -0,0 +1,13 @@
elasticsearch:
templates:
- so/so-beats-template.json.jinja
- so/so-common-template.json
- so/so-firewall-template.json.jinja
- so/so-flow-template.json.jinja
- so/so-ids-template.json.jinja
- so/so-import-template.json.jinja
- so/so-osquery-template.json.jinja
- so/so-ossec-template.json.jinja
- so/so-strelka-template.json.jinja
- so/so-syslog-template.json.jinja
- so/so-zeek-template.json.jinja

View File

@@ -26,6 +26,7 @@ firewall:
- 4200
- 5601
- 6379
- 7788
- 8086
- 8090
- 9001
@@ -33,6 +34,8 @@ firewall:
- 9300
- 9400
- 9500
- 9595
- 9696
udp:
- 1514
minions:

11
pillar/logrotate/init.sls Normal file
View File

@@ -0,0 +1,11 @@
logrotate:
conf: |
daily
rotate 14
missingok
copytruncate
compress
create
extension .log
dateext
dateyesterday

View File

@@ -1,7 +1,6 @@
logstash:
docker_options:
port_bindings:
- 0.0.0.0:514:514
- 0.0.0.0:5044:5044
- 0.0.0.0:5644:5644
- 0.0.0.0:6050:6050

View File

@@ -1,3 +1,4 @@
{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'redis') %}
logstash:
pipelines:
manager:
@@ -5,3 +6,4 @@ logstash:
- so/0009_input_beats.conf
- so/0010_input_hhbeats.conf
- so/9999_output_redis.conf.jinja

View File

@@ -1,3 +1,4 @@
{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %}
logstash:
pipelines:
search:

View File

@@ -1,8 +1,9 @@
base:
'*':
- patch.needs_restarting
- logrotate
'*_eval or *_helix or *_heavynode or *_sensor or *_standalone':
'*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_import':
- match: compound
- zeek
@@ -13,22 +14,23 @@ base:
- logstash.search
- elasticsearch.search
'*_sensor':
- static
- zeeklogs
- healthcheck.sensor
- minions.{{ grains.id }}
'*_manager or *_managersearch':
- match: compound
- static
- data.*
- secrets
- minions.{{ grains.id }}
'*_manager':
- logstash
- logstash.manager
- elasticsearch.manager
'*_manager or *_managersearch':
- match: compound
- data.*
- secrets
- global
- minions.{{ grains.id }}
'*_sensor':
- zeeklogs
- healthcheck.sensor
- global
- minions.{{ grains.id }}
'*_eval':
- data.*
@@ -36,7 +38,7 @@ base:
- secrets
- healthcheck.eval
- elasticsearch.eval
- static
- global
- minions.{{ grains.id }}
'*_standalone':
@@ -48,35 +50,42 @@ base:
- zeeklogs
- secrets
- healthcheck.standalone
- static
- global
- minions.{{ grains.id }}
'*_node':
- static
- global
- minions.{{ grains.id }}
'*_heavynode':
- static
- zeeklogs
- global
- minions.{{ grains.id }}
'*_helix':
- static
- fireeye
- zeeklogs
- logstash
- logstash.helix
- global
- minions.{{ grains.id }}
'*_fleet':
- static
- data.*
- secrets
- global
- minions.{{ grains.id }}
'*_searchnode':
- static
- logstash
- logstash.search
- elasticsearch.search
- global
- minions.{{ grains.id }}
'*_import':
- zeeklogs
- secrets
- elasticsearch.eval
- global
- minions.{{ grains.id }}

View File

@@ -52,4 +52,5 @@ zeek:
- frameworks/signatures/detect-windows-shells
redef:
- LogAscii::use_json = T;
- LogAscii::json_timestamps = JSON::TS_ISO8601;
- LogAscii::json_timestamps = JSON::TS_ISO8601;
- CaptureLoss::watch_interval = 5 mins;

View File

@@ -1,42 +0,0 @@
zeeklogs:
enabled:
- conn
- dce_rpc
- dhcp
- dhcpv6
- dnp3
- dns
- dpd
- files
- ftp
- http
- intel
- irc
- kerberos
- modbus
- mqtt
- notice
- ntlm
- openvpn
- pe
- radius
- rfb
- rdp
- signatures
- sip
- smb_files
- smb_mapping
- smtp
- snmp
- software
- ssh
- ssl
- syslog
- telnet
- tunnel
- weird
- mysql
- socks
- x509
disabled:

View File

@@ -2,6 +2,8 @@
import logging
import sys
from time import time
from os.path import getsize
allowed_functions = ['is_enabled', 'zeek']
states_to_apply = []
@@ -85,8 +87,21 @@ def zeek():
else:
zeek_restart = 0
__salt__['telegraf.send']('healthcheck zeek_restart=%i' % zeek_restart)
#__salt__['telegraf.send']('healthcheck zeek_restart=%i' % zeek_restart)
# write out to file in /nsm/zeek/logs/ for telegraf to read for zeek restart
try:
if getsize("/nsm/zeek/logs/zeek_restart.log") >= 1000000:
openmethod = "w"
else:
openmethod = "a"
except FileNotFoundError:
openmethod = "a"
influxtime = int(time() * 1000000000)
with open("/nsm/zeek/logs/zeek_restart.log", openmethod) as f:
f.write('healthcheck zeek_restart=%i %i\n' % (zeek_restart, influxtime))
if calling_func == 'execute' and zeek_restart:
apply_states()

4
salt/_modules/so.py Normal file
View File

@@ -0,0 +1,4 @@
#!py
def status():
return __salt__['cmd.run']('/usr/sbin/so-status')

View File

@@ -0,0 +1,12 @@
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
debuglevel=2
logfile=/var/log/yum.log
exactarch=1
obsoletes=1
gpgcheck=1
plugins=1
installonly_limit=2
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release

60
salt/airgap/init.sls Normal file
View File

@@ -0,0 +1,60 @@
{% set MANAGER = salt['grains.get']('master') %}
airgapyum:
file.managed:
- name: /etc/yum/yum.conf
- source: salt://airgap/files/yum.conf
airgap_repo:
pkgrepo.managed:
- humanname: Airgap Repo
- baseurl: https://{{ MANAGER }}/repo
- gpgcheck: 0
- sslverify: 0
agbase:
file.absent:
- name: /etc/yum.repos.d/CentOS-Base.repo
agcr:
file.absent:
- name: /etc/yum.repos.d/CentOS-CR.repo
agdebug:
file.absent:
- name: /etc/yum.repos.d/CentOS-Debuginfo.repo
agfasttrack:
file.absent:
- name: /etc/yum.repos.d/CentOS-fasttrack.repo
agmedia:
file.absent:
- name: /etc/yum.repos.d/CentOS-Media.repo
agsources:
file.absent:
- name: /etc/yum.repos.d/CentOS-Sources.repo
agvault:
file.absent:
- name: /etc/yum.repos.d/CentOS-Vault.repo
agkernel:
file.absent:
- name: /etc/yum.repos.d/CentOS-x86_64-kernel.repo
agepel:
file.absent:
- name: /etc/yum.repos.d/epel.repo
agtesting:
file.absent:
- name: /etc/yum.repos.d/epel-testing.repo
agssrepo:
file.absent:
- name: /etc/yum.repos.d/saltstack.repo
agwazrepo:
file.absent:
- name: /etc/yum.repos.d/wazuh.repo

View File

@@ -1,3 +1,8 @@
{% set show_top = salt['state.show_top']() %}
{% set top_states = show_top.values() | join(', ') %}
{% if 'ca' in top_states %}
{% set manager = salt['grains.get']('master') %}
/etc/salt/minion.d/signing_policies.conf:
file.managed:
@@ -10,12 +15,16 @@
file.directory: []
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- bits: 4096
- passphrase:
- cipher: aes_256_cbc
- backup: True
x509.private_key_managed:
- name: /etc/pki/ca.key
- bits: 4096
- passphrase:
- cipher: aes_256_cbc
- backup: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/ca.crt
{%- endif %}
/etc/pki/ca.crt:
x509.certificate_managed:
@@ -32,18 +41,15 @@ pki_private_key:
- days_valid: 3650
- days_remaining: 0
- backup: True
- managed_private_key:
name: /etc/pki/ca.key
bits: 4096
backup: True
- replace: False
- require:
- file: /etc/pki
send_x509_pem_entries_to_mine:
x509_pem_entries:
module.run:
- mine.send:
- func: x509.get_pem_entries
- glob_path: /etc/pki/ca.crt
- name: x509.get_pem_entries
- glob_path: /etc/pki/ca.crt
cakeyperms:
file.managed:
@@ -51,3 +57,11 @@ cakeyperms:
- name: /etc/pki/ca.key
- mode: 640
- group: 939
{% else %}
ca_state_not_allowed:
test.fail_without_changes:
- name: ca_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,2 @@
#!/bin/bash
logrotate -f /opt/so/conf/log-rotate.conf >/dev/null 2>&1

View File

@@ -0,0 +1,2 @@
#!/bin/bash
/usr/sbin/logrotate -f /opt/so/conf/sensor-rotate.conf > /dev/null 2>&1

View File

@@ -0,0 +1,79 @@
The following GUI tools are available on the analyst workstation:
chromium
url: https://www.chromium.org/Home
To run chromium, click Applications > Internet > Chromium Web Browser
Wireshark
url: https://www.wireshark.org/
To run Wireshark, click Applications > Internet > Wireshark Network Analyzer
NetworkMiner
url: https://www.netresec.com
To run NetworkMiner, click Applications > Internet > NetworkMiner
The following CLI tools are available on the analyst workstation:
bit-twist
url: http://bittwist.sourceforge.net
To run bit-twist, open a terminal and type: bittwist -h
chaosreader
url: http://chaosreader.sourceforge.net
To run chaosreader, open a terminal and type: chaosreader -h
dnsiff
url: https://www.monkey.org/~dugsong/dsniff/
To run dsniff, open a terminal and type: dsniff -h
foremost
url: http://foremost.sourceforge.net
To run foremost, open a terminal and type: foremost -h
hping3
url: http://www.hping.org/hping3.html
To run hping3, open a terminal and type: hping3 -h
netsed
url: http://silicone.homelinux.org/projects/netsed/
To run netsed, open a terminal and type: netsed -h
ngrep
url: https://github.com/jpr5/ngrep
To run ngrep, open a terminal and type: ngrep -h
scapy
url: http://www.secdev.org/projects/scapy/
To run scapy, open a terminal and type: scapy
ssldump
url: http://www.rtfm.com/ssldump/
To run ssldump, open a terminal and type: ssldump -h
sslsplit
url: https://github.com/droe/sslsplit
To run sslsplit, open a terminal and type: sslsplit -h
tcpdump
url: http://www.tcpdump.org
To run tcpdump, open a terminal and type: tcpdump -h
tcpflow
url: https://github.com/simsong/tcpflow
To run tcpflow, open a terminal and type: tcpflow -h
tcpstat
url: https://frenchfries.net/paul/tcpstat/
To run tcpstat, open a terminal and type: tcpstat -h
tcptrace
url: http://www.tcptrace.org
To run tcptrace, open a terminal and type: tcptrace -h
tcpxtract
url: http://tcpxtract.sourceforge.net/
To run tcpxtract, open a terminal and type: tcpxtract -h
whois
url: http://www.linux.it/~md/software/
To run whois, open a terminal and type: whois -h

Binary file not shown.

After

Width:  |  Height:  |  Size: 269 KiB

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 87.86 105.22"><defs><style>.cls-1{fill:#fff;}.cls-2{fill:#1976d2;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Layer_1-2" data-name="Layer 1"><g id="Onion"><path id="Flesh" class="cls-1" d="M43.37,71.34a1.27,1.27,0,0,0,.44-.51,4.74,4.74,0,0,0,.61-2.39c-.12-6.79-.22-12.88-4-14.46-4.05-1.72-9.38,3.14-10.71,4.35a19.84,19.84,0,0,0-6.17,12.34c-.1,1-.76,9.34,5.46,15.41s15.45,6.06,21.72,3.53A22.25,22.25,0,0,0,61.88,79.16c5.31-10,1.61-20.31.85-22.3C57.78,44,43.35,36.11,29.88,36.78c-2.17.11-15.82,1-24.16,12.42A30.55,30.55,0,0,0,0,67.36c.15,16.14,13.38,29.51,26.23,34.7,12.61,5.1,24,2.76,28.78,1.65s17.12-4,25.53-15.08a34.47,34.47,0,0,0,7.24-18.46,34.79,34.79,0,0,0-3.42-17.32c-1.11-2.3-6.16-12.09-17-17C57,31.21,48.52,34.37,45.65,29.12a8.46,8.46,0,0,1-.41-6.21,1,1,0,0,0-1.05-1.28l-1.6,0a1.07,1.07,0,0,0-1,.8c-.66,2.51-1.12,6,.51,9.17C46,39.08,56.87,35.31,67.56,42.78c8.29,5.79,14.14,16.69,13.21,27.29a28.06,28.06,0,0,1-6,14.65c-7,9-17,11.29-21.82,12.38-4,.9-13.19,2.87-23.54-.93-2.65-1-20.33-8.29-22.38-25C5.72,60.55,13,48.9,24.21,44.93c13-4.6,27.26,2.75,32.09,13.26.58,1.25,4.85,10.93-.59,18.72-4.05,5.79-13.07,9.94-19.77,6A13.48,13.48,0,0,1,30,68.25c1.42-5,6.37-8.72,8.13-7.84s2.94,6.14,3,9.85A1.39,1.39,0,0,0,43.37,71.34Z"/><path id="Stem" class="cls-2" d="M30,27.14l-4.17,1.27a1.16,1.16,0,0,1-1.49-.93l-.11-.72a26.93,26.93,0,0,0-4.53-11.09A1.13,1.13,0,0,1,20.06,14l1.06-.63a1.15,1.15,0,0,1,1.52.32c.41.58.82,1.17,1.23,1.78l1.48,2.2C28.42,7.27,37.14.12,46.21,0,58.09-.16,65.59,10.67,68,17.63a23.37,23.37,0,0,1,.94,3.64.91.91,0,0,1-1.14,1l-2.66-.73a1.47,1.47,0,0,1-1-1.08,19.71,19.71,0,0,0-1.9-4.8c-3-5.44-9.67-11.21-16.55-10.59-7.74.7-15.22,9.46-14.85,20.91A1.14,1.14,0,0,1,30,27.14Z"/></g></g></g></svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 87.86 105.22"><defs><style>.cls-1{fill:#1976d2;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Layer_1-2" data-name="Layer 1"><g id="Onion"><path id="Flesh" d="M43.37,71.34a1.27,1.27,0,0,0,.44-.51,4.74,4.74,0,0,0,.61-2.39c-.12-6.79-.22-12.88-4-14.46-4.05-1.72-9.38,3.14-10.71,4.35a19.84,19.84,0,0,0-6.17,12.34c-.1,1-.76,9.34,5.46,15.41s15.45,6.06,21.72,3.53A22.25,22.25,0,0,0,61.88,79.16c5.31-10,1.61-20.31.85-22.3C57.78,44,43.35,36.11,29.88,36.78c-2.17.11-15.82,1-24.16,12.42A30.55,30.55,0,0,0,0,67.36c.15,16.14,13.38,29.51,26.23,34.7,12.61,5.1,24,2.76,28.78,1.65s17.12-4,25.53-15.08a34.47,34.47,0,0,0,7.24-18.46,34.79,34.79,0,0,0-3.42-17.32c-1.11-2.3-6.16-12.09-17-17C57,31.21,48.52,34.37,45.65,29.12a8.46,8.46,0,0,1-.41-6.21,1,1,0,0,0-1.05-1.28l-1.6,0a1.07,1.07,0,0,0-1,.8c-.66,2.51-1.12,6,.51,9.17C46,39.08,56.87,35.31,67.56,42.78c8.29,5.79,14.14,16.69,13.21,27.29a28.06,28.06,0,0,1-6,14.65c-7,9-17,11.29-21.82,12.38-4,.9-13.19,2.87-23.54-.93-2.65-1-20.33-8.29-22.38-25C5.72,60.55,13,48.9,24.21,44.93c13-4.6,27.26,2.75,32.09,13.26.58,1.25,4.85,10.93-.59,18.72-4.05,5.79-13.07,9.94-19.77,6A13.48,13.48,0,0,1,30,68.25c1.42-5,6.37-8.72,8.13-7.84s2.94,6.14,3,9.85A1.39,1.39,0,0,0,43.37,71.34Z"/><path id="Stem" class="cls-1" d="M30,27.14l-4.17,1.27a1.16,1.16,0,0,1-1.49-.93l-.11-.72a26.93,26.93,0,0,0-4.53-11.09A1.13,1.13,0,0,1,20.06,14l1.06-.63a1.15,1.15,0,0,1,1.52.32c.41.58.82,1.17,1.23,1.78l1.48,2.2C28.42,7.27,37.14.12,46.21,0,58.09-.16,65.59,10.67,68,17.63a23.37,23.37,0,0,1,.94,3.64.91.91,0,0,1-1.14,1l-2.66-.73a1.47,1.47,0,0,1-1-1.08,19.71,19.71,0,0,0-1.9-4.8c-3-5.44-9.67-11.21-16.55-10.59-7.74.7-15.22,9.46-14.85,20.91A1.14,1.14,0,0,1,30,27.14Z"/></g></g></g></svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 319 KiB

View File

@@ -0,0 +1,23 @@
{%- set logrotate_conf = salt['pillar.get']('logrotate:conf') %}
/opt/so/log/aptcacher-ng/*.log
/opt/so/log/idstools/*.log
/opt/so/log/nginx/*.log
/opt/so/log/soc/*.log
/opt/so/log/kratos/*.log
/opt/so/log/kibana/*.log
/opt/so/log/influxdb/*.log
/opt/so/log/elastalert/*.log
/opt/so/log/soctopus/*.log
/opt/so/log/curator/*.log
/opt/so/log/fleet/*.log
/opt/so/log/suricata/*.log
/opt/so/log/mysql/*.log
/opt/so/log/playbook/*.log
/opt/so/log/logstash/*.log
/opt/so/log/filebeat/*.log
/opt/so/log/telegraf/*.log
/opt/so/log/redis/*.log
{
{{ logrotate_conf | indent(width=4) }}
}

View File

@@ -0,0 +1,10 @@
/opt/so/log/sensor_clean.log
{
daily
rotate 2
missingok
nocompress
create
sharedscripts
endscript
}

View File

@@ -1,5 +1,15 @@
{% set show_top = salt['state.show_top']() %}
{% set top_states = show_top.values() | join(', ') %}
{% if 'common' in top_states %}
{% set role = grains.id.split('_') | last %}
# Remove variables.txt from /tmp - This is temp
rmvariablesfile:
file.absent:
- name: /tmp/variables.txt
# Add socore Group
socoregroup:
group.present:
@@ -46,6 +56,12 @@ salttmp:
# Install epel
{% if grains['os'] == 'CentOS' %}
repair_yumdb:
cmd.run:
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
- onlyif:
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
epel:
pkg.installed:
- skip_suggestions: True
@@ -83,7 +99,7 @@ heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-2
- docker-ce: 5:19.03.9~3-0~ubuntu-bionic
- docker-ce: 5:19.03.12~3-0~ubuntu-bionic
- hold: True
- update_holds: True
@@ -119,7 +135,7 @@ heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-3.2.el7
- docker-ce: 3:19.03.11-3.el7
- docker-ce: 3:19.03.12-3.el7
- hold: True
- update_holds: True
{% endif %}
@@ -158,4 +174,73 @@ utilsyncscripts:
- daymonth: '*'
- month: '*'
- dayweek: '*'
sensorrotatescript:
file.managed:
- name: /usr/local/bin/sensor-rotate
- source: salt://common/cron/sensor-rotate
- mode: 755
sensorrotateconf:
file.managed:
- name: /opt/so/conf/sensor-rotate.conf
- source: salt://common/files/sensor-rotate.conf
- mode: 644
/usr/local/bin/sensor-rotate:
cron.present:
- user: root
- minute: '1'
- hour: '0'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}
commonlogrotatescript:
file.managed:
- name: /usr/local/bin/common-rotate
- source: salt://common/cron/common-rotate
- mode: 755
commonlogrotateconf:
file.managed:
- name: /opt/so/conf/log-rotate.conf
- source: salt://common/files/log-rotate.conf
- template: jinja
- mode: 644
/usr/local/bin/common-rotate:
cron.present:
- user: root
- minute: '1'
- hour: '0'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% if role in ['eval', 'manager', 'managersearch', 'standalone'] %}
# Add config backup
/usr/sbin/so-config-backup > /dev/null 2>&1:
cron.present:
- user: root
- minute: '1'
- hour: '0'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}
# Make sure Docker is always running
docker:
service.running:
- enable: True
{% else %}
common_state_not_allowed:
test.fail_without_changes:
- name: common_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,10 @@
{% set docker = {
'containers': [
'so-filebeat',
'so-nginx',
'so-soc',
'so-kratos',
'so-elasticsearch',
'so-kibana'
]
} %}

View File

@@ -2,4 +2,4 @@
'containers': [
'so-zeek'
]
} %}
} %}

View File

@@ -5,6 +5,9 @@
# to the list predefined by the role / minion id affix
{% macro append_containers(pillar_name, k, compare )%}
{% if salt['pillar.get'](pillar_name~':'~k, {}) != compare %}
{% if k == 'enabled' %}
{% set k = pillar_name %}
{% endif %}
{% from 'common/maps/'~k~'.map.jinja' import docker as d with context %}
{% for li in d['containers'] %}
{{ docker['containers'].append(li) }}
@@ -20,8 +23,8 @@
{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
{{ append_containers('manager', 'grafana', 0) }}
{{ append_containers('static', 'fleet_manager', 0) }}
{{ append_containers('manager', 'wazuh', 0) }}
{{ append_containers('global', 'fleet_manager', 0) }}
{{ append_containers('global', 'wazuh', 0) }}
{{ append_containers('manager', 'thehive', 0) }}
{{ append_containers('manager', 'playbook', 0) }}
{{ append_containers('manager', 'freq', 0) }}
@@ -29,11 +32,11 @@
{% endif %}
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
{{ append_containers('static', 'strelka', 0) }}
{{ append_containers('strelka', 'enabled', 0) }}
{% endif %}
{% if role in ['heavynode', 'standalone'] %}
{{ append_containers('static', 'zeekversion', 'SURICATA') }}
{{ append_containers('global', 'mdengine', 'SURICATA') }}
{% endif %}
{% if role == 'searchnode' %}
@@ -41,5 +44,5 @@
{% endif %}
{% if role == 'sensor' %}
{{ append_containers('static', 'zeekversion', 'SURICATA') }}
{{ append_containers('global', 'mdengine', 'SURICATA') }}
{% endif %}

View File

@@ -21,6 +21,30 @@ local_salt_dir=/opt/so/saltstack/local
SKIP=0
function usage {
cat << EOF
Usage: $0 [-abefhoprsw] [ -i IP ]
This program allows you to add a firewall rule to allow connections from a new IP address or CIDR range.
If you run this program with no arguments, it will present a menu for you to choose your options.
If you want to automate and skip the menu, you can pass the desired options as command line arguments.
EXAMPLES
To add 10.1.2.3 to the analyst role:
so-allow -a -i 10.1.2.3
To add 10.1.2.0/24 to the osquery role:
so-allow -o -i 10.1.2.0/24
EOF
}
while getopts "ahfesprbowi:" OPTION
do
case $OPTION in
@@ -36,7 +60,7 @@ do
FULLROLE="beats_endpoint"
SKIP=1
;;
e)
e)
FULLROLE="elasticsearch_rest"
SKIP=1
;;
@@ -127,7 +151,7 @@ salt-call state.apply firewall queue=True
if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then
# If analyst, add to Wazuh AR whitelist
if [ "$FULLROLE" == "analyst" ]; then
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
WAZUH_MGR_CFG="/nsm/wazuh/etc/ossec.conf"
if ! grep -q "<white_list>$IP</white_list>" $WAZUH_MGR_CFG ; then
DATE=$(date)
sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG

View File

@@ -0,0 +1,23 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
echo ""
echo "Hosts/Networks that have access to login to the Security Onion Console:"
so-firewall includedhosts analyst

View File

@@ -0,0 +1,309 @@
#!/bin/bash
# Copyright 2014-2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
INSTALL_LOG=/root/so-analyst-install.log
exec &> >(tee -a "$INSTALL_LOG")
log() {
msg=$1
level=${2:-I}
now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ")
echo -e "$now | $level | $msg" >> "$INSTALL_LOG" 2>&1
}
error() {
log "$1" "E"
}
info() {
log "$1" "I"
}
title() {
echo -e "\n-----------------------------\n $1\n-----------------------------\n" >> "$INSTALL_LOG" 2>&1
}
logCmd() {
cmd=$1
info "Executing command: $cmd"
$cmd >> "$INSTALL_LOG" 2>&1
}
analyze_system() {
title "System Characteristics"
logCmd "uptime"
logCmd "uname -a"
logCmd "free -h"
logCmd "lscpu"
logCmd "df -h"
logCmd "ip a"
}
analyze_system
OS=$(grep PRETTY_NAME /etc/os-release | grep 'CentOS Linux 7')
if [ $? -ne 0 ]; then
echo "This is an unsupported OS. Please use CentOS 7 to install the analyst node."
exit 1
fi
if [[ "$manufacturer" == "Security Onion Solutions" && "$family" == "Automated" ]]; then
INSTALL=yes
CURLCONTINUE=no
else
INSTALL=''
CURLCONTINUE=''
fi
FIRSTPASS=yes
while [[ $INSTALL != "yes" ]] && [[ $INSTALL != "no" ]]; do
if [[ "$FIRSTPASS" == "yes" ]]; then
clear
echo "###########################################"
echo "## ** W A R N I N G ** ##"
echo "## _______________________________ ##"
echo "## ##"
echo "## Installing the Security Onion ##"
echo "## analyst node on this device will ##"
echo "## make permanenet changes to ##"
echo "## the system. ##"
echo "## ##"
echo "###########################################"
echo "Do you wish to continue? (Type the entire word 'yes' to proceed or 'no' to exit)"
FIRSTPASS=no
else
echo "Please type 'yes' to continue or 'no' to exit."
fi
read INSTALL
done
if [[ $INSTALL == "no" ]]; then
echo "Exiting analyst node installation."
exit 0
fi
echo "Testing for internet connection with curl https://securityonionsolutions.com/"
CANCURL=$(curl -sI https://securityonionsolutions.com/ | grep "200 OK")
if [ $? -ne 0 ]; then
FIRSTPASS=yes
while [[ $CURLCONTINUE != "yes" ]] && [[ $CURLCONTINUE != "no" ]]; do
if [[ "$FIRSTPASS" == "yes" ]]; then
echo "We could not access https://securityonionsolutions.com/."
echo "Since packages are downloaded from the internet, internet acceess is required."
echo "If you would like to ignore this warning and continue anyway, please type 'yes'."
echo "Otherwise, type 'no' to exit."
FIRSTPASS=no
else
echo "Please type 'yes' to continue or 'no' to exit."
fi
read CURLCONTINUE
done
if [[ "$CURLCONTINUE" == "no" ]]; then
echo "Exiting analyst node installation."
exit 0
fi
else
echo "We were able to curl https://securityonionsolutions.com/."
sleep 3
fi
# Install a GUI text editor
yum -y install gedit
# Install misc utils
yum -y install wget curl unzip epel-release yum-plugin-versionlock;
# Install xWindows
yum -y groupinstall "X Window System";
yum -y install gnome-classic-session gnome-terminal nautilus-open-terminal control-center liberation-mono-fonts;
unlink /etc/systemd/system/default.target;
ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target;
yum -y install file-roller
# Install Mono - prereq for NetworkMiner
yum -y install mono-core mono-basic mono-winforms expect
# Install NetworkMiner
yum -y install libcanberra-gtk2;
wget https://www.netresec.com/?download=NetworkMiner -O /tmp/nm.zip;
mkdir -p /opt/networkminer/
unzip /tmp/nm.zip -d /opt/networkminer/;
rm /tmp/nm.zip;
mv /opt/networkminer/NetworkMiner_*/* /opt/networkminer/
chmod +x /opt/networkminer/NetworkMiner.exe;
chmod -R go+w /opt/networkminer/AssembledFiles/;
chmod -R go+w /opt/networkminer/Captures/;
# Create networkminer shim
cat << EOF >> /bin/networkminer
#!/bin/bash
/bin/mono /opt/networkminer/NetworkMiner.exe --noupdatecheck "\$@"
EOF
chmod +x /bin/networkminer
# Convert networkminer ico file to png format
yum -y install ImageMagick
convert /opt/networkminer/networkminericon.ico /opt/networkminer/networkminericon.png
# Create menu entry
cat << EOF >> /usr/share/applications/networkminer.desktop
[Desktop Entry]
Name=NetworkMiner
Comment=NetworkMiner
Encoding=UTF-8
Exec=/bin/networkminer %f
Icon=/opt/networkminer/networkminericon-4.png
StartupNotify=true
Terminal=false
X-MultipleArgs=false
Type=Application
MimeType=application/x-pcap;
Categories=Network;
EOF
# Set default monospace font to Liberation
cat << EOF >> /etc/fonts/local.conf
<match target="pattern">
<test name="family" qual="any">
<string>monospace</string>
</test>
<edit binding="strong" mode="prepend" name="family">
<string>Liberation Mono</string>
</edit>
</match>
EOF
# Install Wireshark for Gnome
yum -y install wireshark-gnome;
# Install dnsiff
yum -y install dsniff;
# Install hping3
yum -y install hping3;
# Install netsed
yum -y install netsed;
# Install ngrep
yum -y install ngrep;
# Install scapy
yum -y install python36-scapy;
# Install ssldump
yum -y install ssldump;
# Install tcpdump
yum -y install tcpdump;
# Install tcpflow
yum -y install tcpflow;
# Install tcpxtract
yum -y install tcpxtract;
# Install whois
yum -y install whois;
# Install foremost
yum -y install https://forensics.cert.org/centos/cert/7/x86_64//foremost-1.5.7-13.1.el7.x86_64.rpm;
# Install chromium
yum -y install chromium;
# Install tcpstat
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-tcpstat-1.5.0/securityonion-tcpstat-1.5.0.rpm;
# Install tcptrace
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-tcptrace-6.6.7/securityonion-tcptrace-6.6.7.rpm;
# Install sslsplit
yum -y install libevent;
yum -y install sslsplit;
# Install Bit-Twist
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-bittwist-2.0.0/securityonion-bittwist-2.0.0.rpm;
# Install chaosreader
yum -y install perl-IO-Compress perl-Net-DNS;
yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-chaosreader-0.95.10/securityonion-chaosreader-0.95.10.rpm;
chmod +x /bin/chaosreader;
if [ -f ../../files/analyst/README ]; then
cp ../../files/analyst/README /;
cp ../../files/analyst/so-wallpaper.jpg /usr/share/backgrounds/;
cp ../../files/analyst/so-lockscreen.jpg /usr/share/backgrounds/;
cp ../../files/analyst/so-login-logo-dark.svg /usr/share/pixmaps/;
else
cp /opt/so/saltstack/default/salt/common/files/analyst/README /;
cp /opt/so/saltstack/default/salt/common/files/analyst/so-wallpaper.jpg /usr/share/backgrounds/;
cp /opt/so/saltstack/default/salt/common/files/analyst/so-lockscreen.jpg /usr/share/backgrounds/;
cp /opt/so/saltstack/default/salt/common/files/analyst/so-login-logo-dark.svg /usr/share/pixmaps/;
fi
# Set background wallpaper
cat << EOF >> /etc/dconf/db/local.d/00-background
# Specify the dconf path
[org/gnome/desktop/background]
# Specify the path to the desktop background image file
picture-uri='file:///usr/share/backgrounds/so-wallpaper.jpg'
# Specify one of the rendering options for the background image:
# 'none', 'wallpaper', 'centered', 'scaled', 'stretched', 'zoom', 'spanned'
picture-options='zoom'
# Specify the left or top color when drawing gradients or the solid color
primary-color='000000'
# Specify the right or bottom color when drawing gradients
secondary-color='FFFFFF'
EOF
# Set lock screen
cat << EOF >> /etc/dconf/db/local.d/00-screensaver
[org/gnome/desktop/session]
idle-delay=uint32 180
[org/gnome/desktop/screensaver]
lock-enabled=true
lock-delay=uint32 120
picture-options='zoom'
picture-uri='file:///usr/share/backgrounds/so-lockscreen.jpg'
EOF
cat << EOF >> /etc/dconf/db/local.d/locks/screensaver
/org/gnome/desktop/session/idle-delay
/org/gnome/desktop/screensaver/lock-enabled
/org/gnome/desktop/screensaver/lock-delay
EOF
# Do not show the user list at login screen
cat << EOF >> /etc/dconf/db/local.d/00-login-screen
[org/gnome/login-screen]
logo='/usr/share/pixmaps/so-login-logo-dark.svg'
disable-user-list=true
EOF
dconf update;
echo
echo "Analyst workstation has been installed!"
echo "Press ENTER to reboot or Ctrl-C to cancel."
read pause
reboot;

View File

@@ -19,14 +19,35 @@ IMAGEREPO=securityonion
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
echo "This script must be run using sudo!"
exit 1
fi
# Define a banner to separate sections
banner="========================================================================="
header() {
echo
printf '%s\n' "$banner" "$*" "$banner"
echo
printf '%s\n' "$banner" "$*" "$banner"
}
lookup_pillar() {
key=$1
salt-call --no-color pillar.get global:${key} --out=newline_values_only
}
lookup_pillar_secret() {
key=$1
salt-call --no-color pillar.get secrets:${key} --out=newline_values_only
}
check_container() {
docker ps | grep "$1:" > /dev/null 2>&1
return $?
}
check_password() {
local password=$1
echo "$password" | egrep -v "'|\"|\\\\" > /dev/null 2>&1
return $?
}

View File

@@ -0,0 +1,44 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.. /usr/sbin/so-common
{% set BACKUPLOCATIONS = salt['pillar.get']('backup:locations', {}) %}
TODAY=$(date '+%Y_%m_%d')
BACKUPFILE="/nsm/backup/so-config-backup-$TODAY.tar"
MAXBACKUPS=7
# Create backup dir if it does not exist
mkdir -p /nsm/backup
# If we haven't already written a backup file for today, let's do so
if [ ! -f $BACKUPFILE ]; then
# Create empty backup file
tar -cf $BACKUPFILE -T /dev/null
# Loop through all paths defined in global.sls, and append them to backup file
{%- for LOCATION in BACKUPLOCATIONS %}
tar -rf $BACKUPFILE {{ LOCATION }}
{%- endfor %}
fi
# Find oldest backup file and remove it
NUMBACKUPS=$(find /nsm/backup/ -type f -name "so-config-backup*" | wc -l)
OLDESTBACKUP=$(find /nsm/backup/ -type f -name "so-config-backup*" | ls -1t | tail -1)
if [ "$NUMBACKUPS" -gt "$MAXBACKUPS" ]; then
rm -f /nsm/backup/$OLDESTBACKUP
fi

View File

@@ -0,0 +1,54 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
usage() {
echo "Usage: $0 <new-user-name>"
echo ""
echo "Adds a new user to Cortex. The new password will be read from STDIN."
exit 1
}
if [ $# -ne 1 ]; then
usage
fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_IP=$(lookup_pillar managerip)
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
CORTEX_USER=$USER
# Read password for new user from stdin
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs CORTEX_PASS
# Create new user in Cortex
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to Cortex."
else
echo "Unable to add user to Cortex; user might already exist."
echo $resp
exit 2
fi

View File

@@ -0,0 +1,57 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
usage() {
echo "Usage: $0 <user-name> <true|false>"
echo ""
echo "Enables or disables a user in Cortex."
exit 1
}
if [ $# -ne 2 ]; then
usage
fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_IP=$(lookup_pillar managerip)
CORTEX_USER=$USER
case "${2^^}" in
FALSE | NO | 0)
CORTEX_STATUS=Locked
;;
TRUE | YES | 1)
CORTEX_STATUS=Ok
;;
*)
usage
;;
esac
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully updated user in Cortex."
else
echo "Failed to update user in Cortex."
echo $resp
exit 2
fi

View File

@@ -64,7 +64,7 @@ if [ $MANAGERCHECK != 'so-helix' ]; then
"so-thehive-cortex:$VERSION" \
"so-curator:$VERSION" \
"so-domainstats:$VERSION" \
"so-elastalert$VERSION" \
"so-elastalert:$VERSION" \
"so-elasticsearch:$VERSION" \
"so-filebeat:$VERSION" \
"so-fleet:$VERSION" \
@@ -76,6 +76,7 @@ if [ $MANAGERCHECK != 'so-helix' ]; then
"so-kibana:$VERSION" \
"so-kratos:$VERSION" \
"so-logstash:$VERSION" \
"so-minio:$VERSION" \
"so-mysql:$VERSION" \
"so-nginx:$VERSION" \
"so-pcaptools:$VERSION" \
@@ -84,7 +85,10 @@ if [ $MANAGERCHECK != 'so-helix' ]; then
"so-soc:$VERSION" \
"so-soctopus:$VERSION" \
"so-steno:$VERSION" \
"so-strelka:$VERSION" \
"so-strelka-frontend:$VERSION" \
"so-strelka-manager:$VERSION" \
"so-strelka-backend:$VERSION" \
"so-strelka-filestream:$VERSION" \
"so-suricata:$VERSION" \
"so-telegraf:$VERSION" \
"so-thehive:$VERSION" \

View File

@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
. /usr/sbin/so-common
SKIP=0
@@ -50,7 +50,11 @@ done
if [ $SKIP -ne 1 ]; then
# List indices
echo
curl {{ MANAGERIP }}:9200/_cat/indices?v
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -k https://{{ NODEIP }}:9200/_cat/indices?v
{% else %}
curl {{ NODEIP }}:9200/_cat/indices?v
{% endif %}
echo
# Inform user we are about to delete all data
echo
@@ -89,10 +93,18 @@ fi
# Delete data
echo "Deleting data..."
INDXS=$(curl -s -XGET {{ MANAGERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
{% if grains['role'] in ['so-node','so-heavynode'] %}
INDXS=$(curl -s -XGET -k https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
{% else %}
INDXS=$(curl -s -XGET {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
{% endif %}
for INDX in ${INDXS}
do
curl -XDELETE "{{ MANAGERIP }}:9200/${INDX}" > /dev/null 2>&1
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -XDELETE -k https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
{% else %}
curl -XDELETE "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
{% endif %}
done
#Start Logstash/Filebeat

View File

@@ -0,0 +1,33 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
. /usr/sbin/so-common
if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
{% else %}
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
{% endif %}
else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
{% else %}
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
{% endif %}
fi

View File

@@ -0,0 +1,31 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
. /usr/sbin/so-common
if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
{% else %}
curl -s {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
{% endif %}
else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
{% else %}
curl -s {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
{% endif %}
fi

View File

@@ -0,0 +1,31 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
. /usr/sbin/so-common
if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_template/* | jq 'keys'
{% else %}
curl -s {{ NODEIP }}:9200/_template/* | jq 'keys'
{% endif %}
else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_template/$1 | jq
{% else %}
curl -s {{ NODEIP }}:9200/_template/$1 | jq
{% endif %}
fi

View File

@@ -1,4 +1,6 @@
{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{%- set mainint = salt['pillar.get']('host:mainint') %}
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
#
@@ -16,7 +18,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
default_conf_dir=/opt/so/conf
ELASTICSEARCH_HOST="{{ MANAGERIP}}"
ELASTICSEARCH_HOST="{{ MYIP }}"
ELASTICSEARCH_PORT=9200
#ELASTICSEARCH_AUTH=""
@@ -28,7 +30,11 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% else %}
curl --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% endif %}
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
@@ -49,7 +55,11 @@ cd ${ELASTICSEARCH_TEMPLATES}
echo "Loading templates..."
{% if grains['role'] in ['so-node','so-heavynode'] %}
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
{% else %}
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
{% endif %}
echo
cd - >/dev/null

View File

@@ -17,6 +17,28 @@
. /usr/sbin/so-common
local_salt_dir=/opt/so/saltstack/local
cat << EOF
This program will switch from the open source version of the Elastic Stack to the Features version licensed under the Elastic license.
If you proceed, then we will download new Docker images and restart services.
Please review the Elastic license:
https://raw.githubusercontent.com/elastic/elasticsearch/master/licenses/ELASTIC-LICENSE.txt
Please also note that, if you have a distributed deployment and continue with this change, Elastic traffic between nodes will change from encrypted to cleartext!
(We expect to support Elastic Features Security at some point in the future.)
Do you agree to the terms of the Elastic license and understand the note about encryption?
If so, type AGREE to accept the Elastic license and continue. Otherwise, just press Enter to exit this program without making any changes.
EOF
read INPUT
if [ "$INPUT" != "AGREE" ]; then
exit
fi
echo "Please wait while switching to Elastic Features."
manager_check() {
# Check to see if this is a manager
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
@@ -29,9 +51,9 @@ manager_check() {
}
manager_check
VERSION=$(grep soversion $local_salt_dir/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
# Modify static.sls to enable Features
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/static.sls
VERSION=$(grep soversion $local_salt_dir/pillar/global.sls | cut -d':' -f2|sed 's/ //g')
# Modify global.sls to enable Features
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls
SUFFIX="-features"
TRUSTED_CONTAINERS=( \
"so-elasticsearch:$VERSION$SUFFIX" \

View File

@@ -0,0 +1,64 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
usage() {
echo "Usage: $0 <new-user-name>"
echo ""
echo "Adds a new user to Fleet. The new password will be read from STDIN."
exit 1
}
if [ $# -ne 1 ]; then
usage
fi
USER=$1
MYSQL_PASS=$(lookup_pillar_secret mysql)
FLEET_IP=$(lookup_pillar fleet_ip)
FLEET_USER=$USER
# Read password for new user from stdin
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs FLEET_PASS
if ! check_password "$FLEET_PASS"; then
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
exit 2
fi
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
if [[ $? -ne 0 ]]; then
echo "Failed to generate Fleet password hash"
exit 2
fi
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
"INSERT INTO users (password,salt,username,email,admin,enabled) VALUES ('$FLEET_HASH','','$FLEET_USER','$FLEET_USER',1,1)" 2>&1)
if [[ $? -eq 0 ]]; then
echo "Successfully added user to Fleet"
else
echo "Unable to add user to Fleet; user might already exist"
echo $resp
exit 2
fi

View File

@@ -0,0 +1,58 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
usage() {
echo "Usage: $0 <user-name>"
echo ""
echo "Enables or disables a user in Fleet"
exit 1
}
if [ $# -ne 2 ]; then
usage
fi
USER=$1
MYSQL_PASS=$(lookup_pillar_secret mysql)
FLEET_IP=$(lookup_pillar fleet_ip)
FLEET_USER=$USER
case "${2^^}" in
FALSE | NO | 0)
FLEET_STATUS=0
;;
TRUE | YES | 1)
FLEET_STATUS=1
;;
*)
usage
;;
esac
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
"UPDATE users SET enabled=$FLEET_STATUS WHERE username='$FLEET_USER'" 2>&1)
if [[ $? -eq 0 ]]; then
echo "Successfully updated user in Fleet"
else
echo "Failed to update user in Fleet"
echo $resp
exit 2
fi

View File

@@ -15,10 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{%- set MANAGERIP = salt['pillar.get']('static:managerip') -%}
{%- set MANAGER = salt['grains.get']('master') %}
{%- set VERSION = salt['pillar.get']('global:soversion') %}
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%}
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
. /usr/sbin/so-common
function usage {
cat << EOF
@@ -32,13 +35,13 @@ EOF
function pcapinfo() {
PCAP=$1
ARGS=$2
docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
}
function pcapfix() {
PCAP=$1
PCAP_OUT=$2
docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
docker run --rm -v "$PCAP:/input.pcap" -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
}
function suricata() {
@@ -57,7 +60,7 @@ function suricata() {
-v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \
-v ${LOG_PATH}:/var/log/suricata/:rw \
-v ${NSM_PATH}/:/nsm/:rw \
-v $PCAP:/input.pcap:ro \
-v "$PCAP:/input.pcap:ro" \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
@@ -76,7 +79,7 @@ function zeek() {
-v $NSM_PATH/logs:/nsm/zeek/logs:rw \
-v $NSM_PATH/spool:/nsm/zeek/spool:rw \
-v $NSM_PATH/extracted:/nsm/zeek/extracted:rw \
-v $PCAP:/input.pcap:ro \
-v "$PCAP:/input.pcap:ro" \
-v /opt/so/conf/zeek/local.zeek:/opt/zeek/share/zeek/site/local.zeek:ro \
-v /opt/so/conf/zeek/node.cfg:/opt/zeek/etc/node.cfg:ro \
-v /opt/so/conf/zeek/zeekctl.cfg:/opt/zeek/etc/zeekctl.cfg:ro \
@@ -210,9 +213,9 @@ cat << EOF
Import complete!
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
https://{{ MANAGERIP }}/#/hunt?q=%2a%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM
https://{{ URLBASE }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
or you can manually set your Time Range to be:
or you can manually set your Time Range to be (in UTC):
From: $START_OLDEST To: $END_NEWEST
Please note that it may take 30 seconds or more for events to appear in Onion Hunt.

View File

@@ -15,4 +15,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
curl -X GET "localhost:9200/_cat/indices?v"
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -X GET -k https://localhost:9200/_cat/indices?v
{% else %}
curl -X GET localhost:9200/_cat/indices?v
{% endif %}

View File

@@ -1,9 +1,9 @@
#!/bin/bash
#
# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %}
# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', '') %}
# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %}
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common
docker exec so-soctopus python3 playbook_play-sync.py >> /opt/so/log/soctopus/so-playbook-sync.log 2>&1
docker exec so-soctopus python3 playbook_play-sync.py

View File

@@ -19,18 +19,22 @@
. /usr/sbin/so-common
echo $banner
printf "Restarting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
echo $banner
if [ $# -ge 1 ]; then
if [ "$2" = "--force" ]
then
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
salt-call saltutil.kill_all_jobs
echo $banner
printf "Restarting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
echo $banner
if [ "$2" = "--force" ]; then
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
salt-call saltutil.kill_all_jobs
fi
case $1 in
"cortex") docker stop so-thehive-cortex so-thehive && docker rm so-thehive-cortex so-thehive && salt-call state.apply hive queue=True;;
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
esac
else
echo -e "\nPlease provide an argument by running like so-restart $component, or by using the component-specific script.\nEx. so-restart filebeat, or so-filebeat-restart\n"
fi
case $1 in
"cortex") docker stop so-thehive-cortex so-thehive && docker rm so-thehive-cortex so-thehive && salt-call state.apply hive queue=True;;
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
esac

View File

@@ -10,4 +10,4 @@ got_root() {
}
got_root
docker exec -it so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat'
docker exec so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat'

View File

@@ -23,99 +23,104 @@ CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
LOG="/opt/so/log/sensor_clean.log"
TODAY=$(date -u "+%Y-%m-%d")
clean () {
## find the oldest Zeek logs directory
OLDEST_DIR=$(ls /nsm/zeek/logs/ | grep -v "current" | grep -v "stats" | grep -v "packetloss" | grep -v "zeek_clean" | sort | head -n 1)
if [ -z "$OLDEST_DIR" -o "$OLDEST_DIR" == ".." -o "$OLDEST_DIR" == "." ]
then
echo "$(date) - No old Zeek logs available to clean up in /nsm/zeek/logs/" >> $LOG
#exit 0
else
echo "$(date) - Removing directory: /nsm/zeek/logs/$OLDEST_DIR" >> $LOG
rm -rf /nsm/zeek/logs/"$OLDEST_DIR"
fi
clean() {
## find the oldest Zeek logs directory
OLDEST_DIR=$(ls /nsm/zeek/logs/ | grep -v "current" | grep -v "stats" | grep -v "packetloss" | grep -v "zeek_clean" | sort | head -n 1)
if [ -z "$OLDEST_DIR" -o "$OLDEST_DIR" == ".." -o "$OLDEST_DIR" == "." ]; then
echo "$(date) - No old Zeek logs available to clean up in /nsm/zeek/logs/" >>$LOG
#exit 0
else
echo "$(date) - Removing directory: /nsm/zeek/logs/$OLDEST_DIR" >>$LOG
rm -rf /nsm/zeek/logs/"$OLDEST_DIR"
fi
## Remarking for now, as we are moving extracted files to /nsm/strelka/processed
## find oldest files in extracted directory and exclude today
#OLDEST_EXTRACT=$(find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' 2>/dev/null | sort | grep -v $TODAY | head -n 1)
#if [ -z "$OLDEST_EXTRACT" -o "$OLDEST_EXTRACT" == ".." -o "$OLDEST_EXTRACT" == "." ]
#then
# echo "$(date) - No old extracted files available to clean up in /nsm/zeek/extracted/complete" >> $LOG
#else
# OLDEST_EXTRACT_DATE=`echo $OLDEST_EXTRACT | awk '{print $1}' | cut -d+ -f1`
# OLDEST_EXTRACT_FILE=`echo $OLDEST_EXTRACT | awk '{print $2}'`
# echo "$(date) - Removing extracted files for $OLDEST_EXTRACT_DATE" >> $LOG
# find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' | grep $OLDEST_EXTRACT_DATE | awk '{print $2}' |while read FILE
# do
# echo "$(date) - Removing extracted file: $FILE" >> $LOG
# rm -f "$FILE"
# done
#fi
## Remarking for now, as we are moving extracted files to /nsm/strelka/processed
## find oldest files in extracted directory and exclude today
#OLDEST_EXTRACT=$(find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' 2>/dev/null | sort | grep -v $TODAY | head -n 1)
#if [ -z "$OLDEST_EXTRACT" -o "$OLDEST_EXTRACT" == ".." -o "$OLDEST_EXTRACT" == "." ]
#then
# echo "$(date) - No old extracted files available to clean up in /nsm/zeek/extracted/complete" >> $LOG
#else
# OLDEST_EXTRACT_DATE=`echo $OLDEST_EXTRACT | awk '{print $1}' | cut -d+ -f1`
# OLDEST_EXTRACT_FILE=`echo $OLDEST_EXTRACT | awk '{print $2}'`
# echo "$(date) - Removing extracted files for $OLDEST_EXTRACT_DATE" >> $LOG
# find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' | grep $OLDEST_EXTRACT_DATE | awk '{print $2}' |while read FILE
# do
# echo "$(date) - Removing extracted file: $FILE" >> $LOG
# rm -f "$FILE"
# done
#fi
## Clean up Zeek extracted files processed by Strelka
STRELKA_FILES='/nsm/strelka/processed'
OLDEST_STRELKA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1 )
if [ -z "$OLDEST_STRELKA" -o "$OLDEST_STRELKA" == ".." -o "$OLDEST_STRELKA" == "." ]
then
echo "$(date) - No old files available to clean up in $STRELKA_FILES" >> $LOG
else
OLDEST_STRELKA_DATE=`echo $OLDEST_STRELKA | awk '{print $1}' | cut -d+ -f1`
OLDEST_STRELKA_FILE=`echo $OLDEST_STRELKA | awk '{print $2}'`
echo "$(date) - Removing extracted files for $OLDEST_STRELKA_DATE" >> $LOG
find $STRELKA_FILES -type f -printf '%T+ %p\n' | grep $OLDEST_STRELKA_DATE | awk '{print $2}' |while read FILE
do
echo "$(date) - Removing file: $FILE" >> $LOG
rm -f "$FILE"
done
fi
## Clean up Zeek extracted files processed by Strelka
STRELKA_FILES='/nsm/strelka/processed'
OLDEST_STRELKA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1)
if [ -z "$OLDEST_STRELKA" -o "$OLDEST_STRELKA" == ".." -o "$OLDEST_STRELKA" == "." ]; then
echo "$(date) - No old files available to clean up in $STRELKA_FILES" >>$LOG
else
OLDEST_STRELKA_DATE=$(echo $OLDEST_STRELKA | awk '{print $1}' | cut -d+ -f1)
OLDEST_STRELKA_FILE=$(echo $OLDEST_STRELKA | awk '{print $2}')
echo "$(date) - Removing extracted files for $OLDEST_STRELKA_DATE" >>$LOG
find $STRELKA_FILES -type f -printf '%T+ %p\n' | grep $OLDEST_STRELKA_DATE | awk '{print $2}' | while read FILE; do
echo "$(date) - Removing file: $FILE" >>$LOG
rm -f "$FILE"
done
fi
## Clean up Suricata log files
SURICATA_LOGS='/nsm/suricata'
OLDEST_SURICATA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1)
if [ -z "$OLDEST_SURICATA" -o "$OLDEST_SURICATA" == ".." -o "$OLDEST_SURICATA" == "." ]
then
echo "$(date) - No old files available to clean up in $SURICATA_LOGS" >> $LOG
else
OLDEST_SURICATA_DATE=`echo $OLDEST_SURICATA | awk '{print $1}' | cut -d+ -f1`
OLDEST_SURICATA_FILE=`echo $OLDEST_SURICATA | awk '{print $2}'`
echo "$(date) - Removing logs for $OLDEST_SURICATA_DATE" >> $LOG
find $SURICATA_LOGS -type f -printf '%T+ %p\n' | grep $OLDEST_SURICATA_DATE | awk '{print $2}' |while read FILE
do
echo "$(date) - Removing file: $FILE" >> $LOG
rm -f "$FILE"
done
fi
## Clean up Suricata log files
SURICATA_LOGS='/nsm/suricata'
OLDEST_SURICATA=$(find $SURICATA_LOGS -type f -printf '%T+ %p\n' | sort -n | head -n 1)
if [[ -z "$OLDEST_SURICATA" ]] || [[ "$OLDEST_SURICATA" == ".." ]] || [[ "$OLDEST_SURICATA" == "." ]]; then
echo "$(date) - No old files available to clean up in $SURICATA_LOGS" >>$LOG
else
OLDEST_SURICATA_DATE=$(echo $OLDEST_SURICATA | awk '{print $1}' | cut -d+ -f1)
OLDEST_SURICATA_FILE=$(echo $OLDEST_SURICATA | awk '{print $2}')
echo "$(date) - Removing logs for $OLDEST_SURICATA_DATE" >>$LOG
find $SURICATA_LOGS -type f -printf '%T+ %p\n' | grep $OLDEST_SURICATA_DATE | awk '{print $2}' | while read FILE; do
echo "$(date) - Removing file: $FILE" >>$LOG
rm -f "$FILE"
done
fi
## Clean up extracted pcaps from Steno
PCAPS='/nsm/pcapout'
OLDEST_PCAP=$(find $PCAPS -type f -printf '%T+ %p\n' | sort -n | head -n 1 )
if [ -z "$OLDEST_PCAP" -o "$OLDEST_PCAP" == ".." -o "$OLDEST_PCAP" == "." ]
then
echo "$(date) - No old files available to clean up in $PCAPS" >> $LOG
else
OLDEST_PCAP_DATE=`echo $OLDEST_PCAP | awk '{print $1}' | cut -d+ -f1`
OLDEST_PCAP_FILE=`echo $OLDEST_PCAP | awk '{print $2}'`
echo "$(date) - Removing extracted files for $OLDEST_PCAP_DATE" >> $LOG
find $PCAPS -type f -printf '%T+ %p\n' | grep $OLDEST_PCAP_DATE | awk '{print $2}' |while read FILE
do
echo "$(date) - Removing file: $FILE" >> $LOG
rm -f "$FILE"
done
fi
# Clean Wazuh archives
# Slightly different code since we have 2 files to remove (.json and .log)
WAZUH_ARCHIVE='/nsm/wazuh/logs/archives'
OLDEST_WAZUH=$(find $WAZUH_ARCHIVE -type f ! -name "archives.json" -printf "%T+\t%p\n" | sort -n | awk '{print $1}' | head -n 1)
# Make sure we don't delete the current files
find $WAZUH_ARCHIVE -type f ! -name "archives.json" -printf "%T+\t%p\n" | sort -n | awk '{print $2}' | head -n 1 >/tmp/files$$
if [[ $(wc -l </tmp/files$$) -ge 1 ]]; then
echo "$(date) - Removing logs for $OLDEST_WAZUH" >>$LOG
while read -r line; do
echo "$(date) - Removing file: $line" >>$LOG
rm "$line"
done </tmp/files$$
else
echo "$(date) - No old files available to clean up in $WAZUH_ARCHIVE" >>$LOG
fi
rm /tmp/files$$
## Clean up extracted pcaps from Steno
PCAPS='/nsm/pcapout'
OLDEST_PCAP=$(find $PCAPS -type f -printf '%T+ %p\n' | sort -n | head -n 1)
if [ -z "$OLDEST_PCAP" -o "$OLDEST_PCAP" == ".." -o "$OLDEST_PCAP" == "." ]; then
echo "$(date) - No old files available to clean up in $PCAPS" >>$LOG
else
OLDEST_PCAP_DATE=$(echo $OLDEST_PCAP | awk '{print $1}' | cut -d+ -f1)
OLDEST_PCAP_FILE=$(echo $OLDEST_PCAP | awk '{print $2}')
echo "$(date) - Removing extracted files for $OLDEST_PCAP_DATE" >>$LOG
find $PCAPS -type f -printf '%T+ %p\n' | grep $OLDEST_PCAP_DATE | awk '{print $2}' | while read FILE; do
echo "$(date) - Removing file: $FILE" >>$LOG
rm -f "$FILE"
done
fi
}
# Check to see if we are already running
IS_RUNNING=$(ps aux | grep "sensor_clean" | grep -v grep | wc -l)
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >> $LOG && exit 0
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
while [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ];
do
clean
CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
done
else
echo "$(date) - Current usage value of $CUR_USAGE not greater than CRIT_DISK_USAGE value of $CRIT_DISK_USAGE..." >> $LOG
while [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; do
clean
CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
done
fi

View File

@@ -19,18 +19,21 @@
. /usr/sbin/so-common
echo $banner
printf "Starting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
echo $banner
if [ $# -ge 1 ]; then
echo $banner
printf "Starting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
echo $banner
if [ "$2" = "--force" ]
then
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
salt-call saltutil.kill_all_jobs
if [ "$2" = "--force" ]; then
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
salt-call saltutil.kill_all_jobs
fi
case $1 in
"all") salt-call state.highstate queue=True;;
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
esac
else
echo -e "\nPlease provide an argument by running like so-start $component, or by using the component-specific script.\nEx. so-start filebeat, or so-filebeat-start\n"
fi
case $1 in
"all") salt-call state.highstate queue=True;;
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
esac

View File

@@ -15,7 +15,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- from 'common/maps/so-status.map.jinja' import docker with context %}
{%- set container_list = docker['containers'] | sort %}
{%- set container_list = docker['containers'] | sort | unique %}
if ! [ "$(id -u)" = 0 ]; then
echo "This command must be run as root"
@@ -27,6 +27,7 @@ ERROR_STRING="ERROR"
SUCCESS_STRING="OK"
PENDING_STRING="PENDING"
MISSING_STRING='MISSING'
CALLER=$(ps -o comm= $PPID)
declare -a BAD_STATUSES=("removing" "paused" "exited" "dead")
declare -a PENDING_STATUSES=("paused" "created" "restarting")
declare -a GOOD_STATUSES=("running")
@@ -71,9 +72,9 @@ compare_lists() {
# {% endraw %}
create_expected_container_list() {
{% for item in container_list%}
{% for item in container_list -%}
expected_container_list+=("{{ item }}")
{% endfor %}
{% endfor -%}
}
populate_container_lists() {
@@ -93,7 +94,7 @@ populate_container_lists() {
for line in "${docker_raw_list[@]}"; do
container_name="$( echo $line | sed -e 's/Name:\(.*\),State:\(.*\)/\1/' )" # Get value in the first search group (container names)
container_state="$( echo $line | sed -e 's/Name:\(.*\),State:\(.*\)/\2/' )" # Get value in the second search group (container states)
temp_container_name_list+=( "${container_name}" )
temp_container_state_list+=( "${container_state}" )
done
@@ -149,33 +150,78 @@ print_line() {
printf "%s \n" " ]"
}
main() {
local focus_color="\e[1;34m"
printf "\n"
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
non_term_print_line() {
local service_name=${1}
local service_state="$( parse_status ${2} )"
systemctl is-active --quiet docker
if [[ $? = 0 ]]; then
print_line "Docker" "running"
else
print_line "Docker" "exited"
fi
local PADDING_CONSTANT=10
populate_container_lists
printf "\n"
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
local num_containers=${#container_name_list[@]}
for i in $(seq 0 $(($num_containers - 1 ))); do
print_line ${container_name_list[$i]} ${container_state_list[$i]}
printf " $service_name "
for i in $(seq 0 $(( 40 - $PADDING_CONSTANT - ${#service_name} - ${#service_state} ))); do
printf "-"
done
printf " [ "
printf "$service_state"
printf "%s \n" " ]"
}
printf "\n"
main() {
# if running from salt
if [ "$CALLER" == 'salt-call' ] || [ "$CALLER" == 'salt-minion' ]; then
printf "\n"
printf "Checking Docker status\n\n"
systemctl is-active --quiet docker
if [[ $? = 0 ]]; then
non_term_print_line "Docker" "running"
else
non_term_print_line "Docker" "exited"
fi
populate_container_lists
printf "\n"
printf "Checking container statuses\n\n"
local num_containers=${#container_name_list[@]}
for i in $(seq 0 $(($num_containers - 1 ))); do
non_term_print_line ${container_name_list[$i]} ${container_state_list[$i]}
done
printf "\n"
# else if running from a terminal
else
local focus_color="\e[1;34m"
printf "\n"
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
systemctl is-active --quiet docker
if [[ $? = 0 ]]; then
print_line "Docker" "running"
else
print_line "Docker" "exited"
fi
populate_container_lists
printf "\n"
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
local num_containers=${#container_name_list[@]}
for i in $(seq 0 $(($num_containers - 1 ))); do
print_line ${container_name_list[$i]} ${container_state_list[$i]}
done
printf "\n"
fi
}
# {% endraw %}
main
main

View File

@@ -19,11 +19,15 @@
. /usr/sbin/so-common
echo $banner
printf "Stopping $1...\n"
echo $banner
if [ $# -ge 1 ]; then
echo $banner
printf "Stopping $1...\n"
echo $banner
case $1 in
*) docker stop so-$1 ; docker rm so-$1 ;;
esac
case $1 in
*) docker stop so-$1 ; docker rm so-$1 ;;
esac
else
echo -e "\nPlease provide an argument by running like so-stop $component, or by using the component-specific script.\nEx. so-stop filebeat, or so-filebeat-stop\n"
fi

View File

@@ -15,7 +15,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Usage: so-tcpreplay "/opt/so/samples/*"
# Usage: so-tcpreplay "/opt/samples/*"
REPLAY_ENABLED=$(docker images | grep so-tcpreplay)
REPLAY_RUNNING=$(docker ps | grep so-tcpreplay)

45
salt/common/tools/sbin/so-test Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Usage: so-test
. /usr/sbin/so-common
REPLAY_ENABLED=$(docker images | grep so-tcpreplay)
REPLAY_RUNNING=$(docker ps | grep so-tcpreplay)
if [ "$REPLAY_ENABLED" != "" ] && [ "$REPLAY_RUNNING" != "" ]; then
echo
echo "Preparing to replay PCAPs..."
docker cp so-tcpreplay:/opt/samples /opt/samples
docker exec -it so-tcpreplay /usr/local/bin/tcpreplay -i bond0 -M10 /opt/samples/*
echo
echo "PCAP's have been replayed - it is normal to see some warnings."
echo
else
echo "Replay functionality not enabled! Enabling Now...."
echo
echo "Note that you will need internet access to download the appropriate components"
/usr/sbin/so-start tcpreplay
echo "Replay functionality enabled. Replaying PCAPs Now...."
docker cp so-tcpreplay:/opt/samples /opt/samples
docker exec -it so-tcpreplay /usr/local/bin/tcpreplay -i bond0 -M10 /opt/samples/*
echo
echo "PCAP's have been replayed - it is normal to see some warnings."
echo
fi

View File

@@ -0,0 +1,57 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
usage() {
echo "Usage: $0 <new-user-name>"
echo ""
echo "Adds a new user to TheHive. The new password will be read from STDIN."
exit 1
}
if [ $# -ne 1 ]; then
usage
fi
USER=$1
THEHIVE_KEY=$(lookup_pillar hivekey)
THEHIVE_IP=$(lookup_pillar managerip)
THEHIVE_USER=$USER
# Read password for new user from stdin
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs THEHIVE_PASS
if ! check_password "$THEHIVE_PASS"; then
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
exit 2
fi
# Create new user in TheHive
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to TheHive"
else
echo "Unable to add user to TheHive; user might already exist"
echo $resp
exit 2
fi

View File

@@ -0,0 +1,57 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
usage() {
echo "Usage: $0 <user-name> <true|false>"
echo ""
echo "Enables or disables a user in TheHive."
exit 1
}
if [ $# -ne 2 ]; then
usage
fi
USER=$1
THEHIVE_KEY=$(lookup_pillar hivekey)
THEHIVE_IP=$(lookup_pillar managerip)
THEHIVE_USER=$USER
case "${2^^}" in
FALSE | NO | 0)
THEHIVE_STATUS=Locked
;;
TRUE | YES | 1)
THEHIVE_STATUS=Ok
;;
*)
usage
;;
esac
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully updated user in TheHive"
else
echo "Failed to update user in TheHive"
echo "$resp"
exit 2
fi

View File

@@ -8,31 +8,21 @@
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
got_root() {
. /usr/sbin/so-common
# Make sure you are root
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
}
# Make sure the user is root
got_root
if [[ $# < 1 || $# > 2 ]]; then
echo "Usage: $0 <list|add|update|delete|validate|valemail|valpass> [email]"
if [[ $# -lt 1 || $# -gt 2 ]]; then
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
echo ""
echo " list: Lists all user email addresses currently defined in the identity system"
echo " add: Adds a new user to the identity system; requires 'email' parameter"
echo " update: Updates a user's password; requires 'email' parameter"
echo " delete: Deletes an existing user; requires 'email' parameter"
echo " enable: Enables a user; requires 'email' parameter"
echo " disable: Disables a user; requires 'email' parameter"
echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter"
echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter"
echo " valpass: Validates that a password is acceptable for defining a new user"
echo ""
echo " Note that the password can be piped into stdin to avoid prompting for it."
echo " Note that the password can be piped into STDIN to avoid prompting for it"
exit 1
fi
@@ -74,7 +64,7 @@ function findIdByEmail() {
email=$1
response=$(curl -Ss ${kratosUrl}/identities)
identityId=$(echo "${response}" | jq ".[] | select(.addresses[0].value == \"$email\") | .id")
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
echo $identityId
}
@@ -100,14 +90,16 @@ function validateEmail() {
function updatePassword() {
identityId=$1
# Read password from stdin (show prompt only if no stdin was piped in)
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -s password
if [ -z "$password" ]; then
# Read password from stdin (show prompt only if no stdin was piped in)
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs password
validatePassword "$password"
validatePassword "$password"
fi
if [[ -n $identityId ]]; then
# Generate password hash
@@ -124,7 +116,7 @@ function listUsers() {
response=$(curl -Ss ${kratosUrl}/identities)
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
echo "${response}" | jq -r ".[] | .addresses[0].value" | sort
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
}
function createUser() {
@@ -133,17 +125,8 @@ function createUser() {
now=$(date -u +%FT%TZ)
addUserJson=$(cat <<EOF
{
"addresses": [
{
"expires_at": "2099-01-31T12:00:00Z",
"value": "${email}",
"verified": true,
"verified_at": "${now}",
"via": "so-add-user"
}
],
"traits": {"email":"${email}"},
"traits_schema_id": "default"
"schema_id": "default"
}
EOF
)
@@ -163,6 +146,36 @@ EOF
updatePassword $identityId
}
function updateStatus() {
email=$1
status=$2
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
response=$(curl -Ss "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
if [[ "$status" == "locked" ]]; then
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to lock credential record"
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
else
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to unlock credential record"
fi
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson")
[[ $? != 0 ]] && fail "Unable to mark user as locked"
}
function updateUser() {
email=$1
@@ -188,8 +201,11 @@ case "${operation}" in
[[ "$email" == "" ]] && fail "Email address must be provided"
validateEmail "$email"
updatePassword
createUser "$email"
echo "Successfully added new user"
echo "Successfully added new user to SOC"
check_container thehive && echo $password | so-thehive-user-add "$email"
check_container fleet && echo $password | so-fleet-user-add "$email"
;;
"list")
@@ -205,12 +221,34 @@ case "${operation}" in
echo "Successfully updated user"
;;
"enable")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
updateStatus "$email" 'active'
echo "Successfully enabled user"
check_container thehive && so-thehive-user-enable "$email" true
check_container fleet && so-fleet-user-enable "$email" true
;;
"disable")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
updateStatus "$email" 'locked'
echo "Successfully disabled user"
check_container thehive && so-thehive-user-enable "$email" false
check_container fleet && so-fleet-user-enable "$email" false
;;
"delete")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
deleteUser "$email"
echo "Successfully deleted user"
echo "Successfully deleted user"
check_container thehive && so-thehive-user-enable "$email" false
check_container fleet && so-fleet-user-enable "$email" false
;;
"validate")

View File

@@ -0,0 +1,2 @@
#!/bin/bash
so-user disable $*

View File

@@ -0,0 +1,2 @@
#!/bin/bash
so-user enable $*

View File

@@ -0,0 +1,2 @@
#!/bin/bash
so-user list

View File

@@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if docker ps |grep so-wazuh >/dev/null 2>&1; then
docker exec -it so-wazuh /var/ossec/bin/manage_agents "$@"
else
echo "Wazuh manager is not running. Please start it with so-wazuh-start."
fi

View File

@@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
if docker ps |grep so-wazuh >/dev/null 2>&1; then
docker exec -it so-wazuh /var/ossec/bin/agent_upgrade "$@"
else
echo "Wazuh manager is not running. Please start it with so-wazuh-start."
fi

View File

@@ -14,10 +14,10 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
clone_dir="/tmp"
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
#mkdir -p $output_dir
mkdir -p $output_dir
repos="$output_dir/repos.txt"
ignorefile="$output_dir/ignore.txt"
@@ -25,8 +25,70 @@ deletecounter=0
newcounter=0
updatecounter=0
gh_status=$(curl -s -o /dev/null -w "%{http_code}" http://github.com)
{% if ISAIRGAP is sameas true %}
clone_dir="/nsm/repo/rules/strelka"
repo_name="signature-base"
mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base
[ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
# Copy over rules
for i in $(find $clone_dir/yara -name "*.yar*"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
repo_sum=$(sha256sum $i | awk '{print $1}')
# Check rules against those in ignore list -- don't copy if ignored.
if ! grep -iq $rule_name $ignorefile; then
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
# For existing rules, check to see if they need to be updated, by comparing checksums
if [ $existing_rules -gt 0 ];then
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
if [ "$repo_sum" != "$local_sum" ]; then
echo "Checksums do not match!"
echo "Updating $rule_name..."
cp $i $output_dir/$repo_name;
((updatecounter++))
fi
else
# If rule doesn't exist already, we'll add it
echo "Adding new rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
fi
fi;
done
# Check to see if we have any old rules that need to be removed
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
is_repo_rule=$(find $clone_dir -name "$i" | wc -l)
if [ $is_repo_rule -eq 0 ]; then
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
rm $output_dir/$repo_name/$i
((deletecounter++))
fi
done
echo "Done!"
if [ "$newcounter" -gt 0 ];then
echo "$newcounter new rules added."
fi
if [ "$updatecounter" -gt 0 ];then
echo "$updatecounter rules updated."
fi
if [ "$deletecounter" -gt 0 ];then
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
fi
{% else %}
gh_status=$(curl -s -o /dev/null -w "%{http_code}" http://github.com)
clone_dir="/tmp"
if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
while IFS= read -r repo; do
@@ -68,7 +130,7 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
fi
fi;
done
# Check to see if we have any old rules that need to be removed
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
@@ -100,3 +162,4 @@ else
echo "No connectivity to Github...exiting..."
exit 1
fi
{%- endif -%}

View File

@@ -2,17 +2,14 @@
local_salt_dir=/opt/so/saltstack/local
zeek_logs_enabled() {
echo "zeeklogs:" > $local_salt_dir/pillar/zeeklogs.sls
echo " enabled:" >> $local_salt_dir/pillar/zeeklogs.sls
for BLOG in ${BLOGS[@]}; do
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/zeeklogs.sls
done
}
whiptail_manager_adv_service_zeeklogs() {
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \
"dce_rpc" "RPC Logs" ON \
@@ -52,7 +49,25 @@ whiptail_manager_adv_service_zeeklogs() {
"mysql" "MySQL Logs" ON \
"socks" "SOCKS Logs" ON \
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
local exitstatus=$?
IFS=' ' read -ra BLOGS <<< "$BLOGS"
return $exitstatus
}
whiptail_manager_adv_service_zeeklogs
zeek_logs_enabled
return_code=$?
case $return_code in
1)
whiptail --title "Security Onion Setup" --msgbox "Cancelling. No changes have been made." 8 75
;;
255)
whiptail --title "Security Onion Setup" --msgbox "Whiptail error occured, exiting." 8 75
;;
*)
zeek_logs_enabled
;;
esac

View File

@@ -18,22 +18,85 @@
. /usr/sbin/so-common
UPDATE_DIR=/tmp/sogh/securityonion
INSTALLEDVERSION=$(cat /etc/soversion)
default_salt_dir=/opt/so/saltstack/default
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
DEFAULT_SALT_DIR=/opt/so/saltstack/default
BATCHSIZE=5
SOUP_LOG=/root/soup.log
exec 3>&1 1>${SOUP_LOG} 2>&1
manager_check() {
# Check to see if this is a manager
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch')$ ]]; then
echo "This is a manager. We can proceed"
if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch'|'so-import')$ ]]; then
echo "This is a manager. We can proceed."
MINIONID=$(salt-call grains.get id --out=txt|awk -F: {'print $2'}|tr -d ' ')
else
echo "Please run soup on the manager. The manager controls all updates."
exit 0
fi
}
airgap_mounted() {
# Let's see if the ISO is already mounted.
if [ -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
echo "The ISO is already mounted"
else
echo ""
echo "Looks like we need access to the upgrade content"
echo ""
echo "If you just copied the .iso file over you can specify the path."
echo "If you burned the ISO to a disk the standard way you can specify the device."
echo "Example: /home/user/securityonion-2.X.0.iso"
echo "Example: /dev/cdrom"
echo ""
read -p 'Enter the location of the iso: ' ISOLOC
if [ -f $ISOLOC ]; then
# Mounting the ISO image
mkdir -p /tmp/soagupdate
mount -t iso9660 -o loop $ISOLOC /tmp/soagupdate
# Make sure mounting was successful
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
echo "Something went wrong trying to mount the ISO."
echo "Ensure you verify the ISO that you downloaded."
exit 0
else
echo "ISO has been mounted!"
fi
elif [ -f $ISOLOC/SecurityOnion/VERSION ]; then
ln -s $ISOLOC /tmp/soagupdate
echo "Found the update content"
else
mkdir -p /tmp/soagupdate
mount $ISOLOC /tmp/soagupdate
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
echo "Something went wrong trying to mount the device."
echo "Ensure you verify the ISO that you downloaded."
exit 0
else
echo "Device has been mounted!"
fi
fi
fi
}
check_airgap() {
# See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap | awk '{print $2}')
if [[ "$AIRGAP" == "True" ]]; then
is_airgap=0
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/Packages
else
is_airgap=1
fi
}
clean_dockers() {
# Place Holder for cleaning up old docker images
echo ""
echo "Trying to clean up old dockers."
docker system prune -a -f
}
clone_to_tmp() {
@@ -43,8 +106,11 @@ clone_to_tmp() {
# Make a temp location for the files
mkdir -p /tmp/sogh
cd /tmp/sogh
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion.git
git clone https://github.com/Security-Onion-Solutions/securityonion.git
SOUP_BRANCH=""
if [ -n "$BRANCH" ]; then
SOUP_BRANCH="-b $BRANCH"
fi
git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git
cd /tmp
if [ ! -f $UPDATE_DIR/VERSION ]; then
echo "Update was unable to pull from github. Please check your internet."
@@ -54,29 +120,207 @@ clone_to_tmp() {
copy_new_files() {
# Copy new files over to the salt dir
cd /tmp/sogh/securityonion
rsync -a salt $default_salt_dir/
rsync -a pillar $default_salt_dir/
chown -R socore:socore $default_salt_dir/
chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/
rsync -a pillar $DEFAULT_SALT_DIR/
chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp
}
detect_os() {
# Detect Base OS
echo "Determining Base OS." >> "$SOUP_LOG" 2>&1
if [ -f /etc/redhat-release ]; then
OS="centos"
elif [ -f /etc/os-release ]; then
OS="ubuntu"
fi
echo "Found OS: $OS" >> "$SOUP_LOG" 2>&1
}
highstate() {
# Run a highstate but first cancel a running one.
salt-call saltutil.kill_all_jobs
salt-call state.highstate
salt-call state.highstate -l info
}
masterlock() {
echo "Locking Salt Master"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
TOPFILE=/opt/so/saltstack/default/salt/top.sls
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
mv -v $TOPFILE $BACKUPTOPFILE
echo "base:" > $TOPFILE
echo " $MINIONID:" >> $TOPFILE
echo " - ca" >> $TOPFILE
echo " - ssl" >> $TOPFILE
echo " - elasticsearch" >> $TOPFILE
fi
}
masterunlock() {
echo "Unlocking Salt Master"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
mv -v $BACKUPTOPFILE $TOPFILE
fi
}
playbook() {
echo "Applying playbook settings"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
salt-call state.apply playbook.OLD_db_init
rm -f /opt/so/rules/elastalert/playbook/*.yaml
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
fi
}
pillar_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if pillar changes are needed"
echo "Checking to see if pillar changes are needed."
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
}
rc1_to_rc2() {
# Move the static file to global.sls
echo "Migrating static.sls to global.sls"
mv -v /opt/so/saltstack/local/pillar/static.sls /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
# Moving baseurl from minion sls file to inside global.sls
local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls)
sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls;
sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls;
# Adding play values to the global.sls
local HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
local CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
# Move storage nodes to hostname for SSL
# Get a list we can use:
grep -A1 searchnode /opt/so/saltstack/local/pillar/data/nodestab.sls | grep -v '\-\-' | sed '$!N;s/\n/ /' | awk '{print $1,$3}' | awk '/_searchnode:/{gsub(/\_searchnode:/, "_searchnode"); print}' >/tmp/nodes.txt
# Remove the nodes from cluster settings
while read p; do
local NAME=$(echo $p | awk '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Removing the old cross cluster config for $NAME"
curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}'
done </tmp/nodes.txt
# Add the nodes back using hostname
while read p; do
local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
done </tmp/nodes.txt
INSTALLEDVERSION=rc.2
}
rc2_to_rc3() {
# move location of local.rules
cp /opt/so/saltstack/default/salt/idstools/localrules/local.rules /opt/so/saltstack/local/salt/idstools/local.rules
if [ -f /opt/so/saltstack/local/salt/idstools/localrules/local.rules ]; then
cat /opt/so/saltstack/local/salt/idstools/localrules/local.rules >> /opt/so/saltstack/local/salt/idstools/local.rules
fi
rm -rf /opt/so/saltstack/local/salt/idstools/localrules
rm -rf /opt/so/saltstack/default/salt/idstools/localrules
# Rename mdengine to MDENGINE
sed -i "s/ zeekversion/ mdengine/g" /opt/so/saltstack/local/pillar/global.sls
# Enable Strelka Rules
sed -i "/ rules:/c\ rules: 1" /opt/so/saltstack/local/pillar/global.sls
INSTALLEDVERSION=rc.3
}
rc3_to_2.3.0() {
# Fix Tab Complete
if [ ! -f /etc/profile.d/securityonion.sh ]; then
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
fi
{
echo "redis_settings:"
echo " redis_maxmemory: 827"
echo "playbook:"
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
} >> /opt/so/saltstack/local/pillar/global.sls
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
{
echo "playbook_admin: $(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)"
echo "playbook_automation: $(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)"
} >> /opt/so/saltstack/local/pillar/secrets.sls
}
space_check() {
# Check to see if there is enough space
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
if [ "$CURRENTSPACE" -lt "10" ]; then
echo "You are low on disk space. Upgrade will try and clean up space.";
clean_dockers
else
echo "Plenty of space for upgrading"
fi
}
unmount_update() {
cd /tmp
umount /tmp/soagupdate
}
update_centos_repo() {
# Update the files in the repo
echo "Syncing new updates to /nsm/repo"
rsync -a $AGDOCKER/repo /nsm/repo
echo "Creating repo"
createrepo /nsm/repo
}
update_dockers() {
if [ $is_airgap -eq 0 ]; then
# Let's copy the tarball
if [ ! -f $AGDOCKER/registry.tar ]; then
echo "Unable to locate registry. Exiting"
exit 0
else
echo "Stopping the registry docker"
docker stop so-dockerregistry
docker rm so-dockerregistry
echo "Copying the new dockers over"
tar xvf $AGDOCKER/registry.tar -C /nsm/docker-registry/docker
fi
else
# List all the containers
if [ $MANAGERCHECK != 'so-helix' ]; then
if [ $MANAGERCHECK == 'so-import' ]; then
TRUSTED_CONTAINERS=( \
"so-idstools" \
"so-nginx" \
"so-filebeat" \
"so-suricata" \
"so-soc" \
"so-elasticsearch" \
"so-kibana" \
"so-kratos" \
"so-suricata" \
"so-registry" \
"so-pcaptools" \
"so-zeek" )
elif [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng" \
"so-thehive-cortex" \
@@ -94,6 +338,7 @@ update_dockers() {
"so-kibana" \
"so-kratos" \
"so-logstash" \
"so-minio" \
"so-mysql" \
"so-nginx" \
"so-pcaptools" \
@@ -102,7 +347,10 @@ update_dockers() {
"so-soc" \
"so-soctopus" \
"so-steno" \
"so-strelka" \
"so-strelka-frontend" \
"so-strelka-manager" \
"so-strelka-backend" \
"so-strelka-filestream" \
"so-suricata" \
"so-telegraf" \
"so-thehive" \
@@ -121,7 +369,7 @@ update_dockers() {
"so-telegraf" \
"so-zeek" )
fi
# Download the containers from the interwebs
for i in "${TRUSTED_CONTAINERS[@]}"
do
@@ -132,14 +380,21 @@ update_dockers() {
docker tag $IMAGEREPO/$i:$NEWVERSION $HOSTNAME:5000/$IMAGEREPO/$i:$NEWVERSION
docker push $HOSTNAME:5000/$IMAGEREPO/$i:$NEWVERSION
done
fi
# Cleanup on Aisle 4
clean_dockers
echo "Add Registry back if airgap"
if [ $is_airgap -eq 0 ]; then
docker load -i $AGDOCKER/registry_image.tar
fi
}
update_version() {
# Update the version to the latest
echo "Updating the version file."
echo "Updating the Security Onion version file."
echo $NEWVERSION > /etc/soversion
sed -i 's/$INSTALLEDVERSION/$NEWVERISON/g' /opt/so/saltstack/local/pillar/static.sls
sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global.sls
}
upgrade_check() {
@@ -148,54 +403,172 @@ upgrade_check() {
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
fi
}
upgrade_check_salt() {
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
echo "Performing Upgrade from $INSTALLEDVERSION to $NEWVERSION"
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
fi
}
verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum /tmp/sogh/securityonion/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup."
cp $UPDATE_DIR/salt/common/tools/sbin/soup $default_salt_dir/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo ""
echo "soup has been updated. Please run soup again"
echo "soup has been updated. Please run soup again."
exit 0
fi
}
echo "Checking to see if this is a manager"
manager_check
echo "Cloning latest code to a temporary location"
clone_to_tmp
main () {
while getopts ":b" opt; do
case "$opt" in
b ) # process option b
shift
BATCHSIZE=$1
if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then
echo "Batch size must be a number greater than 0."
exit 1
fi
;;
\? ) echo "Usage: cmd [-b]"
;;
esac
done
echo "Checking to see if this is a manager."
echo ""
echo "Verifying we have the latest script"
manager_check
echo "Checking to see if this is an airgap install"
echo ""
check_airgap
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
echo ""
detect_os
echo ""
if [ $is_airgap -eq 0 ]; then
# Let's mount the ISO since this is airgap
airgap_mounted
else
echo "Cloning Security Onion github repo into $UPDATE_DIR."
clone_to_tmp
fi
echo ""
echo "Verifying we have the latest soup script."
verify_latest_update_script
echo ""
echo "Let's see if we need to update"
echo "Let's see if we need to update Security Onion."
upgrade_check
space_check
echo ""
echo "Making pillar changes"
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
echo ""
echo "Stopping Salt Minion service."
systemctl stop salt-minion
echo ""
echo "Stopping Salt Master service."
systemctl stop salt-master
echo ""
echo "Checking for Salt Master and Minion updates."
upgrade_check_salt
echo "Making pillar changes."
pillar_changes
echo ""
echo "Cleaning up old dockers"
clean_dockers
echo ""
echo "Updating docker to $NEWVERSION"
echo "Updating dockers to $NEWVERSION."
update_dockers
# Only update the repo if its airgap
if [ $is_airgap -eq 0 ]; then
update_centos_repo
fi
echo ""
echo "Copying new code"
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
copy_new_files
echo ""
echo "Running a highstate to complete upgrade"
update_version
echo ""
echo "Locking down Salt Master for upgrade"
masterlock
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
echo ""
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
highstate
echo ""
echo "Updating version"
update_version
echo ""
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
echo ""
echo "Stopping Salt Master to remove ACL"
systemctl stop salt-master
masterunlock
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
highstate
playbook
unmount_update
SALTUPGRADED="True"
if [[ "$SALTUPGRADED" == "True" ]]; then
echo ""
echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion
echo ""
fi
}
main "$@" | tee /dev/fd/3

View File

@@ -1,7 +1,7 @@
#!/bin/bash
{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
@@ -11,7 +11,7 @@
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%}
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -33,20 +33,32 @@ LOG="/opt/so/log/curator/so-curator-closed-delete.log"
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
# 2. Are there any closed logstash-, or so- indices that we can delete?
# 2. Are there any closed logstash- or so- indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" > /dev/null; do
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
{% else %}
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
{% endif %}
# We need to determine OLDEST_INDEX.
# First, get the list of closed indices that are prefixed with "logstash-" or "so-".
# For example: logstash-ids-YYYY.MM.DD
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
{% if grains['role'] in ['so-node','so-heavynode'] %}
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
{% else %}
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
{% endif %}
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
{% else %}
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
{% endif %}
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}

View File

@@ -12,11 +12,11 @@ client:
- {{elasticsearch}}
port: 9200
url_prefix:
use_ssl: False
{% if grains['role'] in ['so-node', 'so-heavynode'] %} use_ssl: True{% else %} use_ssl: False{% endif %}
certificate:
client_cert:
client_key:
ssl_no_validate: False
{% if grains['role'] in ['so-node', 'so-heavynode'] %} ssl_no_validate: True{% else %} ssl_no_validate: False{% endif %}
http_auth:
timeout: 30
master_only: False

View File

@@ -1,5 +1,10 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set show_top = salt['state.show_top']() %}
{% set top_states = show_top.values() | join(', ') %}
{% if 'curator' in top_states %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
# Curator
@@ -131,3 +136,11 @@ so-curator:
# End Curator Cron Jobs
{% endif %}
{% else %}
curator_state_not_allowed:
test.fail_without_changes:
- name: curator_state_not_allowed
{% endif %}

View File

@@ -1,4 +1,4 @@
{%- set FLEETSETUP = salt['pillar.get']('static:fleetsetup', '0') -%}
{%- set FLEETSETUP = salt['pillar.get']('global:fleetsetup', '0') -%}
{%- if FLEETSETUP != 0 %}
launcherpkg:

View File

@@ -1,3 +1,8 @@
{% set show_top = salt['state.show_top']() %}
{% set top_states = show_top.values() | join(', ') %}
{% if 'docker' in top_states %}
installdocker:
pkg.installed:
- name: docker-ce
@@ -5,4 +10,12 @@ installdocker:
# Make sure Docker is running!
docker:
service.running:
- enable: True
- enable: True
{% else %}
docker_state_not_allowed:
test.fail_without_changes:
- name: docker_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,45 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3']%}
{% for VERSION in OLDVERSIONS %}
remove_images_{{ VERSION }}:
docker_image.absent:
- force: True
- images:
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-acng:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-cortex:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-curator:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elastalert:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-filebeat:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet-launcher:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-freqserver:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-grafana:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-influxdb:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kibana:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kratos:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-mysql:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-nginx:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-playbook:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-redis:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soctopus:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-frontend:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-manager:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-backend:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-filestream:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-telegraf:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-wazuh:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }}'
{% endfor %}

View File

@@ -12,8 +12,12 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set show_top = salt['state.show_top']() %}
{% set top_states = show_top.values() | join(', ') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% if 'domainstats' in top_states %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
# Create the group
dstatsgroup:
@@ -51,3 +55,11 @@ so-domainstats:
- user: domainstats
- binds:
- /opt/so/log/domainstats:/var/log/domain_stats
{% else %}
domainstats_state_not_allowed:
test.fail_without_changes:
- name: domainstats_state_not_allowed
{% endif %}

View File

@@ -16,12 +16,12 @@ disable_rules_on_error: false
# How often ElastAlert will query Elasticsearch
# The unit can be anything from weeks to seconds
run_every:
minutes: 1
minutes: 3
# ElastAlert will buffer results from the most recent
# period of time, in case some log sources are not in real time
buffer_time:
minutes: 1
minutes: 10
# The maximum time between queries for ElastAlert to start at the most recently
# run query. When ElastAlert starts, for each rule, it will search elastalert_metadata
@@ -38,7 +38,7 @@ es_host: {{ esip }}
es_port: {{ esport }}
# Sets timeout for connecting to and reading from es_host
es_conn_timeout: 60
es_conn_timeout: 55
# The maximum number of documents that will be downloaded from Elasticsearch in
# a single query. The default is 10,000, and if you expect to get near this number,

View File

@@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter):
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
headers = {"Content-Type": "application/json"}
payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"data": match, "@timestamp": timestamp}
payload = {"rule": { "name": self.rule['play_title'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)

View File

@@ -1,51 +0,0 @@
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance.
#
es_host: {{es}}
es_port: 9200
name: Suricata-Alert
type: frequency
index: "so-ids-*"
num_events: 1
timeframe:
minutes: 10
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
query_key: ["rule.uuid","source.ip","destination.ip"]
realert:
days: 1
filter:
- query:
query_string:
query: "event.module: suricata AND rule.severity:(1 OR 2)"
alert: hivealerter
hive_connection:
hive_host: http://{{hivehost}}
hive_port: 9000/thehive
hive_apikey: {{hivekey}}
hive_proxies:
http: ''
https: ''
hive_alert_config:
title: '{match[rule][name]}'
type: 'NIDS'
source: 'SecurityOnion'
description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=network.community_id%3A%20%20%22{match[network][community_id]}%22%20%7C%20groupby%20source.ip%20destination.ip,event.module,%20event.dataset> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3
status: 'New'
follow: True
hive_observable_data_mapping:
- ip: '{match[source][ip]}'
- ip: '{match[destination][ip]}'

View File

@@ -1,49 +0,0 @@
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance.
#
es_host: {{es}}
es_port: 9200
name: Wazuh-Alert
type: frequency
index: "so-ossec-*"
num_events: 1
timeframe:
minutes: 10
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
realert:
days: 1
filter:
- query:
query_string:
query: "event.module: ossec AND rule.level>=8"
alert: hivealerter
hive_connection:
hive_host: http://{{hivehost}}
hive_port: 9000/thehive
hive_apikey: {{hivekey}}
hive_proxies:
http: ''
https: ''
hive_alert_config:
title: '{match[rule][name]}'
type: 'wazuh'
source: 'SecurityOnion'
description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=event.module%3A%20ossec%20AND%20rule.id%3A{match[rule][id]}%20%7C%20groupby%20host.name%20rule.name> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))>"
severity: 2
tags: ['{match[rule][id]}','{match[host][name]}']
tlp: 3
status: 'New'
follow: True
hive_observable_data_mapping:
- other: '{match[host][name]}'

View File

@@ -12,9 +12,16 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set show_top = salt['state.show_top']() %}
{% set top_states = show_top.values() | join(', ') %}
{% if 'elastalert' in top_states %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{%- set MANAGER_URL = salt['pillar.get']('global:url_base', '') %}
{%- set MANAGER_IP = salt['pillar.get']('global:managerip', '') %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set esalert = salt['pillar.get']('manager:elastalert', '1') %}
@@ -84,14 +91,6 @@ elastasomodulesync:
- group: 933
- makedirs: True
elastarulesync:
file.recurse:
- name: /opt/so/rules/elastalert
- source: salt://elastalert/files/rules/so
- user: 933
- group: 933
- template: jinja
elastaconf:
file.managed:
- name: /opt/so/conf/elastalert/elastalert_config.yaml
@@ -100,6 +99,12 @@ elastaconf:
- group: 933
- template: jinja
wait_for_elasticsearch:
module.run:
- http.wait_for_successful_query:
- url: 'http://{{MANAGER}}:9200/_cat/indices/.kibana*'
- wait_for: 180
so-elastalert:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elastalert:{{ VERSION }}
@@ -112,5 +117,16 @@ so-elastalert:
- /opt/so/log/elastalert:/var/log/elastalert:rw
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/config/elastalert_config.yaml:ro
- extra_hosts:
- {{MANAGER_URL}}:{{MANAGER_IP}}
- require:
- module: wait_for_elasticsearch
{% endif %}
{% else %}
elastalert_state_not_allowed:
test.fail_without_changes:
- name: elastalert_state_not_allowed
{% endif %}

View File

@@ -5,6 +5,7 @@
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername', '') %}
{%- endif %}
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
cluster.name: "{{ ESCLUSTERNAME }}"
network.host: 0.0.0.0
@@ -16,12 +17,30 @@ discovery.zen.minimum_master_nodes: 1
path.logs: /var/log/elasticsearch
action.destructive_requires_name: true
transport.bind_host: 0.0.0.0
transport.publish_host: {{ NODEIP }}
transport.publish_host: {{ grains.host }}
transport.publish_port: 9300
cluster.routing.allocation.disk.threshold_enabled: true
cluster.routing.allocation.disk.watermark.low: 95%
cluster.routing.allocation.disk.watermark.high: 98%
cluster.routing.allocation.disk.watermark.flood_stage: 98%
{%- if FEATURES is sameas true %}
#xpack.security.enabled: false
#xpack.security.http.ssl.enabled: false
#xpack.security.transport.ssl.enabled: false
#xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
#xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
#xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt
#xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
#xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
#xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt
#xpack.security.transport.ssl.verification_mode: none
#xpack.security.http.ssl.client_authentication: none
#xpack.security.authc:
# anonymous:
# username: anonymous_user
# roles: superuser
# authz_exception: true
{%- endif %}
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
node.name: {{ ESCLUSTERNAME }}
script.max_compilations_rate: 1000/1m

View File

@@ -1,53 +1,8 @@
{
"description" : "beats.common",
"processors" : [
{"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}},
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } },
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } },
{ "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } },
{ "set": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } },
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } },
{ "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -21,39 +21,30 @@
"properties": ["ip", "country_iso_code", "country_name", "continent_name", "region_iso_code", "region_name", "city_name", "timezone", "location"]
}
},
{
"split": {
"field": "_index",
"target_field": "index_name_prefix",
"separator": "-"
}
},
{
"date_index_name": {
"field": "@timestamp",
"index_name_prefix": "{{index_name_prefix.0}}-{{index_name_prefix.1}}-",
"date_rounding": "d",
"ignore_failure": true,
"index_name_format": "yyyy.MM.dd"
}
},
{ "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } },
{ "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } },
{ "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } },
{ "set": { "if": "ctx.event?.severity == 4", "field": "event.severity_label", "value": "critical", "override": true } },
{ "rename": { "field": "module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
{ "lowercase": { "field": "event.dataset", "ignore_failure": true, "ignore_missing": true } },
{ "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } },
{ "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } },
{ "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } },
{ "set": { "if": "ctx.event?.severity == 4", "field": "event.severity_label", "value": "critical", "override": true } },
{ "rename": { "field": "fields.category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "fields.module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
{ "lowercase": { "field": "event.dataset", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "destination.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "source.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{
"remove": {
"field": [ "index_name_prefix", "message2", "type" ],
"ignore_failure": true
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset" ], "ignore_missing": true, "ignore_failure": true } },
{
"date_index_name": {
"field": "@timestamp",
"index_name_prefix": "{{ _index }}-",
"date_rounding": "d",
"ignore_failure": true,
"index_name_format": "yyyy.MM.dd"
}
}
]

View File

@@ -0,0 +1,17 @@
{
"description" : "common.nids",
"processors" : [
{ "convert": { "if": "ctx.rule.uuid != null", "field": "rule.uuid", "type": "integer" } },
{ "set": { "if": "ctx.rule?.uuid < 1000000", "field": "rule.reference", "value": "https://www.snort.org/search?query={{rule.gid}}-{{rule.uuid}}" } },
{ "set": { "if": "ctx.rule?.uuid > 1999999", "field": "rule.reference", "value": "https://doc.emergingthreats.net/{{rule.uuid}}" } },
{ "convert": { "if": "ctx.rule.uuid != null", "field": "rule.uuid", "type": "string" } },
{ "dissect": { "if": "ctx.rule.name != null", "field": "rule.name", "pattern" : "%{rule_type} %{rest_of_rulename} ", "ignore_failure": true } },
{ "set": { "if": "ctx.rule_type == 'GPL'", "field": "rule_ruleset", "value": "Snort GPL" } },
{ "set": { "if": "ctx.rule_type == 'ET'", "field": "rule.ruleset", "value": "Emerging Threats" } },
{ "set": { "if": "ctx.rule.severity == 3", "field": "event.severity", "value": 1, "override": true } },
{ "set": { "if": "ctx.rule.severity == 2", "field": "event.severity", "value": 2, "override": true } },
{ "set": { "if": "ctx.rule.severity == 1", "field": "event.severity", "value": 3, "override": true } },
{ "remove": { "field": ["rule_type", "rest_of_rulename"], "ignore_failure": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -1,17 +0,0 @@
{
"description" : "common_nids",
"processors" : [
{ "convert": { "field": "sid", "type": "integer" } },
{ "set": { "if": "ctx.sid < 1000000", "field": "signature_info", "value": "https://www.snort.org/search?query={{gid}}-{{sid}}" } },
{ "set": { "if": "ctx.sid > 1999999", "field": "signature_info", "value": "https://doc.emergingthreats.net/{{sid}}" } },
{ "remove": { "if": "ctx.sid > 2999999", "field": "signature_info" } },
{ "set": { "if": "ctx.priority == '1'", "field": "severity", "value": "High" } },
{ "set": { "if": "ctx.priority == '2'", "field": "severity", "value": "Medium" } },
{ "set": { "if": "ctx.priority == '3'", "field": "severity", "value": "Low" } },
{ "dissect": { "field": "alert", "pattern" : "%{rule_type} %{category} ", "ignore_failure": true } },
{ "set": { "if": "ctx.rule_type == 'GPL'", "field": "rule_type", "value": "Snort GPL" } },
{ "set": { "if": "ctx.rule_type == 'ET'", "field": "rule_type", "value": "Emerging Threats" } },
{ "lowercase": { "field": "category", "ignore_failure": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -0,0 +1,59 @@
{
"description" : "filterlog",
"processors" : [
{
"dissect": {
"field": "real_message",
"pattern" : "%{rule.uuid},%{rule.sub_uuid},%{firewall.anchor},%{firewall.tracker_id},%{interface.name},%{rule.reason},%{rule.action},%{network.direction},%{ip.version},%{firewall.sub_message}",
"on_failure" : [ {"set" : {"field" : "error.message","value" : "{{ _ingest.on_failure_message }}"}}]
}
},
{
"dissect": {
"if": "ctx.ip.version == '4'",
"field": "firewall.sub_message",
"pattern" : "%{ip.tos},%{ip.ecn},%{ip.ttl},%{ip.id},%{ip.offset},%{ip.flags},%{network.transport_id},%{network.transport},%{data.length},%{source.ip},%{destination.ip},%{ip_sub_msg}",
"on_failure" : [ {"set" : {"field" : "error.message","value" : "{{ _ingest.on_failure_message }}"}}]
}
},
{
"dissect": {
"if": "ctx.ip?.version == '6'",
"field": "firewall.sub_message",
"pattern" : "%{network.class},%{network.flow_label},%{network.hop_limit},%{network.transport},%{network.transport_id},%{data.length},%{source.ip},%{destination.ip},%{ip_sub_msg}",
"on_failure" : [ {"set" : {"field" : "error.message","value" : "{{ _ingest.on_failure_message }}"}}]
}
},
{
"dissect": {
"if": "ctx.network?.transport == 'tcp'",
"field": "ip_sub_msg",
"pattern" : "%{source.port},%{destination.port},%{data.length},%{tcp.flags},",
"on_failure" : [ {"set" : {"field" : "error.message","value" : "{{ _ingest.on_failure_message }}"}}]
}
},
{
"dissect": {
"if": "ctx.network?.transport == 'udp'",
"field": "ip_sub_msg",
"pattern" : "%{source.port},%{destination.port},%{data.length}",
"on_failure" : [ {"set" : {"field" : "error.message","value" : "{{ _ingest.on_failure_message }}"}}]
}
},
{
"split": {
"if": "ctx.ip.version =='6' && ctx.network?.transport == 'Options'",
"field": "ip_sub_msg",
"target_field": "ip.options",
"separator" : ",",
"on_failure" : [ {"set" : {"field" : "error.message","value" : "{{ _ingest.on_failure_message }}"}}]
}
},
{ "set": { "field": "_index", "value": "so-firewall", "override": true } },
{ "set": { "if": "ctx.network?.transport_id == '0'", "field": "network.transport", "value": "icmp", "override": true } },
{"community_id": { "if": "ctx.network?.transport != null", "field":["source.ip","source.port","destination.ip","destination.port","network.transport"],"target_field":"network.community_id"}},
{ "set": { "field": "module", "value": "pfsense", "override": true } },
{ "set": { "field": "dataset", "value": "firewall", "override": true } },
{ "remove": { "field": ["real_message", "ip_sub_msg", "firewall.sub_message"], "ignore_failure": true } }
]
}

View File

@@ -0,0 +1,9 @@
{
"description" : "import.wel",
"processors" : [
{ "remove": { "field": ["event.created","timestamp", "winlog.event_data.UtcTime", "event_record_id"], "ignore_failure": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -2,78 +2,24 @@
"description" : "osquery",
"processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } },
{ "json": { "field": "message2.columns.data", "target_field": "message2.columns.winlog", "ignore_failure": true } },
{ "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } },
{ "json": { "field": "winlog.data", "target_field": "temp", "ignore_failure": true } },
{ "rename": { "field": "temp.Data", "target_field": "winlog.event_data", "ignore_missing": true } },
{ "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } },
{ "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
{
"script": {
"lang": "painless",
"source": "def dict = ['result': new HashMap()]; for (entry in ctx['message2'].entrySet()) { dict['result'][entry.getKey()] = entry.getValue(); } ctx['osquery'] = dict; "
}
},
{ "rename": { "field": "osquery.result.hostIdentifier", "target_field": "osquery.result.host_identifier", "ignore_missing": true } },
{ "rename": { "field": "osquery.result.calendarTime", "target_field": "osquery.result.calendar_time", "ignore_missing": true } },
{ "rename": { "field": "osquery.result.unixTime", "target_field": "osquery.result.unix_time", "ignore_missing": true } },
{ "json": { "field": "message", "target_field": "message3", "ignore_failure": true } },
{ "gsub": { "field": "message3.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } },
{ "json": { "field": "message3.columns.data", "target_field": "message3.columns.winlog", "ignore_failure": true } },
{ "rename": { "field": "message3.columns.username", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.uid", "target_field": "user.uid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.gid", "target_field": "user.gid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.shell", "target_field": "user.shell", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.cmdline", "target_field": "process.command_line", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.pid", "target_field": "process.pid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.parent", "target_field": "process.ppid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.cwd", "target_field": "process.working_directory", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.community_id", "target_field": "network.community_id", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.local_address", "target_field": "local.ip", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.local_port", "target_field": "local.port", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.remote_address", "target_field": "remote.ip", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.remote_port", "target_field": "remote.port", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.process_name", "target_field": "process.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.eventid", "target_field": "event.code", "ignore_missing": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } },
{ "set": { "if": "ctx.message3.columns?.source != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{message3.columns.source}}", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } },
{ "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationPort", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.Image", "target_field": "process.executable", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.ProcessID", "target_field": "process.pid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.CommandLine", "target_field": "process.command_line", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.Description", "target_field": "process.pe.description", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.Product", "target_field": "process.pe.product", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.User", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.parentImage", "target_field": "parent_image_path", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.sourceIp", "target_field": "source_ip", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.sourcePort", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.targetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "remove": { "field": [ "message3"], "ignore_failure": false } },
{ "set": { "field": "event.module", "value": "osquery", "override": false } },
{ "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -1,52 +1,69 @@
{
"description" : "ossec",
"processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.agent", "target_field": "agent", "ignore_missing": true } },
{ "rename": { "field": "message2.data", "target_field": "data", "ignore_missing": true } },
{ "rename": { "field": "message2.decoder", "target_field": "decoder", "ignore_missing": true } },
{ "rename": { "field": "message2.full_log", "target_field": "full_log", "ignore_missing": true } },
{ "rename": { "field": "message2.id", "target_field": "log.id.id", "ignore_missing": true } },
{ "rename": { "field": "message2.location", "target_field": "location", "ignore_missing": true } },
{ "rename": { "field": "message2.manager", "target_field": "manager", "ignore_missing": true } },
{ "rename": { "field": "message2.predecoder", "target_field": "predecoder", "ignore_missing": true } },
{ "rename": { "field": "message2.timestamp", "target_field": "timestamp", "ignore_missing": true } },
{ "rename": { "field": "message2.rule", "target_field": "rule", "ignore_missing": true } },
{ "rename": { "field": "data.command", "target_field": "command", "ignore_missing": true } },
{ "rename": { "field": "data.dstip", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "data.dstport", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "data.dstuser", "target_field": "user.escalated", "ignore_missing": true } },
{ "rename": { "field": "data.srcip", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "data.srcuser", "target_field": "source.user", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationPort", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.image", "target_field": "image_path", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.parentImage", "target_field": "parent_image_path", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourceIp", "target_field": "source_ip", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourcePort", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.targetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "data.win.system.eventID", "target_field": "event.code", "ignore_missing": true } },
{ "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } },
{ "set": { "if": "ctx.rule.level == 1", "field": "rule.category", "value": "None" } },
{ "set": { "if": "ctx.rule.level == 2", "field": "rule.category", "value": "System low priority notification" } },
{ "set": { "if": "ctx.rule.level == 3", "field": "rule.category", "value": "Successful/authorized event" } },
{ "set": { "if": "ctx.rule.level == 4", "field": "rule.category", "value": "System low priority error" } },
{ "set": { "if": "ctx.rule.level == 5", "field": "rule.category", "value": "User generated error" } },
{ "set": { "if": "ctx.rule.level == 6", "field": "rule.category", "value": "Low relevance attack" } },
{ "set": { "if": "ctx.rule.level == 7", "field": "rule.category", "value": "\"Bad word\" matching" } },
{ "set": { "if": "ctx.rule.level == 8", "field": "rule.category", "value": "First time seen" } },
{ "set": { "if": "ctx.rule.level == 9", "field": "rule.category", "value": "Error from invalid source" } },
{ "set": { "if": "ctx.rule.level == 10", "field": "rule.category", "value": "Multiple user generated errors" } },
{ "set": { "if": "ctx.rule.level == 11", "field": "rule.category", "value": "Integrity checking warning" } },
{ "set": { "if": "ctx.rule.level == 12", "field": "rule.category", "value": "High importance event" } },
{ "set": { "if": "ctx.rule.level == 13", "field": "rule.category", "value": "Unusal error (high importance)" } },
{ "set": { "if": "ctx.rule.level == 14", "field": "rule.category", "value": "High importance security event" } },
{ "set": { "if": "ctx.rule.level == 15", "field": "rule.category", "value": "Severe attack" } },
{ "append": { "if": "ctx.rule.level != null", "field": "tags", "value": ["alert"] } },
{ "remove": { "field": [ "host", "predecoder", "decoder" ], "ignore_missing": true, "ignore_failure": false } },
{ "pipeline": { "name": "common" } }
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "remove": { "field": [ "agent" ], "ignore_missing": true, "ignore_failure": true } },
{ "rename": { "field": "message2.agent", "target_field": "agent", "ignore_missing": true } },
{ "rename": { "field": "message2.data", "target_field": "data", "ignore_missing": true } },
{ "rename": { "field": "message2.decoder", "target_field": "decoder", "ignore_missing": true } },
{ "rename": { "field": "message2.full_log", "target_field": "log.full", "ignore_missing": true } },
{ "rename": { "field": "message2.id", "target_field": "log.id.id", "ignore_missing": true } },
{ "rename": { "field": "message2.location", "target_field": "log.location", "ignore_missing": true } },
{ "rename": { "field": "message2.manager", "target_field": "manager", "ignore_missing": true } },
{ "rename": { "field": "message2.predecoder", "target_field": "predecoder", "ignore_missing": true } },
{ "rename": { "field": "message2.timestamp", "target_field": "event.timestamp", "ignore_missing": true } },
{ "rename": { "field": "message2.previous_log", "target_field": "log.previous_log", "ignore_missing": true } },
{ "rename": { "field": "message2.previous_output", "target_field": "log.previous_output", "ignore_missing": true } },
{ "rename": { "field": "message2.rule", "target_field": "rule", "ignore_missing": true } },
{ "rename": { "field": "message2.syscheck", "target_field": "host.syscheck", "ignore_missing": true } },
{ "rename": { "field": "data.command", "target_field": "process.command_line", "ignore_missing": true } },
{ "rename": { "field": "data.dstip", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "data.dstport", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "data.dstuser", "target_field": "user.escalated", "ignore_missing": true } },
{ "rename": { "field": "data.srcip", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "data.process", "target_field": "process", "ignore_missing": true } },
{ "rename": { "field": "data.program", "target_field": "program", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationPort", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourceIp", "target_field": "source_ip", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourcePort", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.targetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "data.win.system", "target_field": "winlog", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata", "target_field": "winlog.event_data", "ignore_missing": true } },
{ "rename": { "field": "winlog.eventID", "target_field": "winlog.event_id", "ignore_missing": true } },
{ "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } },
{ "rename": { "field": "decoder.name", "target_field": "event.dataset", "ignore_missing": true } },
{ "rename": { "field": "rule.description", "target_field": "rule.name", "ignore_missing": true } },
{ "rename": { "field": "rule.id", "target_field": "rule.uuid", "ignore_missing": true } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 1", "field": "rule.category", "value": "None" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 2", "field": "rule.category", "value": "System low priority notification" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 3", "field": "rule.category", "value": "Successful/authorized event" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 4", "field": "rule.category", "value": "System low priority error" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 5", "field": "rule.category", "value": "User generated error" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 6", "field": "rule.category", "value": "Low relevance attack" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 7", "field": "rule.category", "value": "\"Bad word\" matching" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 8", "field": "rule.category", "value": "First time seen" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 9", "field": "rule.category", "value": "Error from invalid source" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 10", "field": "rule.category", "value": "Multiple user generated errors" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 11", "field": "rule.category", "value": "Integrity checking warning" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 12", "field": "rule.category", "value": "High importance event" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 13", "field": "rule.category", "value": "Unusal error (high importance)" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 14", "field": "rule.category", "value": "High importance security event" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level == 15", "field": "rule.category", "value": "Severe attack" } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level >= 1 && ctx.rule.level <=7", "field": "event.severity", "value": 1, "override": true } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level >= 8 && ctx.rule.level <=11", "field": "event.severity", "value": 2, "override": true } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level >= 12 && ctx.rule.level <=14", "field": "event.severity", "value": 3, "override": true } },
{ "set": { "if": "ctx.rule != null && ctx.rule.level >= 15", "field": "event.severity", "value": 4, "override": true } },
{ "rename": { "field": "rule.id", "target_field": "rule.uuid", "ignore_missing": true } },
{ "remove": { "field": [ "predecoder" ], "ignore_failure": true } },
{ "rename": { "field": "fields.category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "fields.module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
{ "set": { "if": "ctx.containsKey('rule') && ctx.rule != null", "field": "event.dataset", "value": "alert", "override": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -1,59 +0,0 @@
{
"description" : "ossec",
"processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "remove": { "field": [ "agent" ], "ignore_missing": true, "ignore_failure": false } },
{ "rename": { "field": "message2.agent", "target_field": "agent", "ignore_missing": true } },
{ "rename": { "field": "message2.data", "target_field": "data", "ignore_missing": true } },
{ "rename": { "field": "message2.decoder", "target_field": "decoder", "ignore_missing": true } },
{ "rename": { "field": "message2.full_log", "target_field": "log.full", "ignore_missing": true } },
{ "rename": { "field": "message2.id", "target_field": "log.id.id", "ignore_missing": true } },
{ "rename": { "field": "message2.location", "target_field": "log.location", "ignore_missing": true } },
{ "rename": { "field": "message2.manager", "target_field": "manager", "ignore_missing": true } },
{ "rename": { "field": "message2.predecoder", "target_field": "predecoder", "ignore_missing": true } },
{ "rename": { "field": "message2.timestamp", "target_field": "event.timestamp", "ignore_missing": true } },
{ "rename": { "field": "message2.previous_log", "target_field": "log.previous_log", "ignore_missing": true } },
{ "rename": { "field": "message2.previous_output", "target_field": "log.previous_output", "ignore_missing": true } },
{ "rename": { "field": "message2.rule", "target_field": "rule", "ignore_missing": true } },
{ "rename": { "field": "message2.syscheck", "target_field": "host.syscheck", "ignore_missing": true } },
{ "rename": { "field": "data.command", "target_field": "process.command_line", "ignore_missing": true } },
{ "rename": { "field": "data.dstip", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "data.dstport", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "data.dstuser", "target_field": "user.escalated", "ignore_missing": true } },
{ "rename": { "field": "data.srcip", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.destinationPort", "target_field": "destination.port", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.image", "target_field": "image_path", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.parentImage", "target_field": "parent_image_path", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourceIp", "target_field": "source_ip", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.sourcePort", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.targetFilename", "target_field": "file.target", "ignore_missing": true } },
{ "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "data.win.system.eventID", "target_field": "event.code", "ignore_missing": true } },
{ "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } },
{ "rename": { "field": "rule.description", "target_field": "rule.name", "ignore_missing": true } },
{ "set": { "if": "ctx.rule.level == 1", "field": "rule.category", "value": "None" } },
{ "set": { "if": "ctx.rule.level == 2", "field": "rule.category", "value": "System low priority notification" } },
{ "set": { "if": "ctx.rule.level == 3", "field": "rule.category", "value": "Successful/authorized event" } },
{ "set": { "if": "ctx.rule.level == 4", "field": "rule.category", "value": "System low priority error" } },
{ "set": { "if": "ctx.rule.level == 5", "field": "rule.category", "value": "User generated error" } },
{ "set": { "if": "ctx.rule.level == 6", "field": "rule.category", "value": "Low relevance attack" } },
{ "set": { "if": "ctx.rule.level == 7", "field": "rule.category", "value": "\"Bad word\" matching" } },
{ "set": { "if": "ctx.rule.level == 8", "field": "rule.category", "value": "First time seen" } },
{ "set": { "if": "ctx.rule.level == 9", "field": "rule.category", "value": "Error from invalid source" } },
{ "set": { "if": "ctx.rule.level == 10", "field": "rule.category", "value": "Multiple user generated errors" } },
{ "set": { "if": "ctx.rule.level == 11", "field": "rule.category", "value": "Integrity checking warning" } },
{ "set": { "if": "ctx.rule.level == 12", "field": "rule.category", "value": "High importance event" } },
{ "set": { "if": "ctx.rule.level == 13", "field": "rule.category", "value": "Unusal error (high importance)" } },
{ "set": { "if": "ctx.rule.level == 14", "field": "rule.category", "value": "High importance security event" } },
{ "set": { "if": "ctx.rule.level == 15", "field": "rule.category", "value": "Severe attack" } },
{ "set": { "if": "ctx.rule.level <= 7", "field": "event.severity", "value": 1, "override": true } },
{ "set": { "if": "ctx.rule.level >= 8 && ctx.rule.level <= 11", "field": "event.severity", "value": 2, "override": true } },
{ "set": { "if": "ctx.rule.level >= 12", "field": "event.severity", "value": 3, "override": true } },
{ "append": { "if": "ctx.rule.level != null", "field": "tags", "value": ["alert"] } },
{ "remove": { "field": [ "predecoder", "decoder" ], "ignore_missing": true, "ignore_failure": false } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -1,25 +0,0 @@
{
"description" : "sguild_nids",
"processors" : [
{
"dissect": {
"field": "message",
"pattern" : "%{} %{} %{} Alert Received: %{} %{priority} %{classification} %{interface} {%{alerttime}} %{} %{} {%{alert}} %{source_ip} %{destination_ip} %{protocol} %{source_port} %{destination_port} %{gid} %{sid} %{rev} ",
"on_failure": [ { "drop" : { } } ]
}
},
{ "set": { "if": "ctx.protocol == '1'", "field": "protocol", "value": "ICMP" } },
{ "set": { "if": "ctx.protocol == '6'", "field": "protocol", "value": "TCP" } },
{ "set": { "if": "ctx.protocol == '17'", "field": "protocol", "value": "UDP" } },
{ "remove": { "if": "ctx.source_ip == '{}'", "field": "source_ip" } },
{ "remove": { "if": "ctx.destination_ip == '{}'", "field": "destination_ip" } },
{ "remove": { "if": "ctx.protocol == '{}'", "field": "protocol" } },
{ "remove": { "if": "ctx.source_port == '{}'", "field": "source_port" } },
{ "remove": { "if": "ctx.destination_port == '{}'", "field": "destination_port" } },
{ "set": { "field": "type", "value": "snort" } },
{ "rename": { "field": "@timestamp", "target_field": "timestamp", "ignore_missing": true } },
{ "date": { "field": "alerttime", "target_field": "@timestamp", "formats": ["yyyy-MM-dd HH:mm:ss"], "ignore_failure": true } },
{ "remove": { "field": "alerttime", "ignore_missing": true } },
{ "pipeline": { "name": "common_nids" } }
]
}

View File

@@ -1,21 +0,0 @@
{
"description" : "snort",
"processors" : [
{
"dissect": {
"field": "message",
"pattern" : "[%{gid}:%{sid}:%{rev}] %{alert} [Classification: %{classification}] [Priority: %{priority}]: <%{interface}> {%{protocol}} %{source_ip_port} -> %{destination_ip_port}",
"on_failure": [ { "drop" : { } } ]
}
},
{ "split": { "field": "source_ip_port", "separator": ":", "ignore_failure": true } },
{ "split": { "field": "destination_ip_port", "separator": ":", "ignore_failure": true } },
{ "rename":{ "field": "source_ip_port.1", "target_field": "source_port", "ignore_failure": true } },
{ "rename":{ "field": "destination_ip_port.1", "target_field": "destination_port", "ignore_failure": true } },
{ "rename":{ "field": "source_ip_port.0", "target_field": "source_ip", "ignore_failure": true } },
{ "rename":{ "field": "destination_ip_port.0", "target_field": "destination_ip", "ignore_failure": true } },
{ "remove":{ "field": "source_ip_port", "ignore_failure": true } },
{ "remove":{ "field": "destination_ip_port", "ignore_failure": true } },
{ "pipeline": { "name": "common_nids" } }
]
}

View File

@@ -6,7 +6,8 @@
{ "rename": { "field": "message2.scan", "target_field": "scan", "ignore_missing": true } },
{ "rename": { "field": "message2.request", "target_field": "request", "ignore_missing": true } },
{ "rename": { "field": "scan.hash", "target_field": "hash", "ignore_missing": true } },
{ "grok": { "field": "request.attributes.filename", "patterns": ["-%{WORD:log.id.fuid}-"] } },
{ "grok": { "if": "ctx.request?.attributes?.filename != null", "field": "request.attributes.filename", "patterns": ["-%{WORD:log.id.fuid}-"], "ignore_failure": true } },
{ "foreach":
{
"if": "ctx.scan?.exiftool?.keys !=null",
@@ -19,8 +20,29 @@
}
}
},
{ "foreach":
{
"if": "ctx.scan?.yara?.meta !=null",
"field": "scan.yara.meta",
"processor":{
"set": {
"field": "rule.{{_ingest._value.identifier}}",
"value": "{{_ingest._value.value}}"
}
}
}
},
{ "set": { "if": "ctx.scan?.yara?.matches != null", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }},
{ "set": { "if": "ctx.scan?.yara?.matches != null", "field": "dataset", "value": "alert", "override": true }},
{ "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }},
{ "set": { "if": "ctx.rule?.name != null && ctx.rule?.score == null", "field": "event.severity", "value": 3, "override": true } },
{ "convert" : { "if": "ctx.rule?.score != null", "field" : "rule.score","type": "integer"}},
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 0 && ctx.rule?.score <= 49", "field": "event.severity", "value": 1, "override": true } },
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 50 && ctx.rule?.score <=69", "field": "event.severity", "value": 2, "override": true } },
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 70 && ctx.rule?.score <=89", "field": "event.severity", "value": 3, "override": true } },
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 90", "field": "event.severity", "value": 4, "override": true } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }},
{ "remove": { "field": ["host", "path", "message", "scan.exiftool.keys"], "ignore_missing": true } },
{ "remove": { "field": ["host", "path", "message", "scan.exiftool.keys", "scan.yara.meta"], "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

Some files were not shown because too many files have changed in this diff Show More